blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e7573159e65752f462dbbbfb65d3985f11302c75 | 2ef39fbd5563f0bb7fd8f23126271c08dd150a34 | /conanfile.py | a64e1e02e47b08443b1952726e6e6193997b22ec | [
"Unlicense"
] | permissive | trondhe/cmake_template | 600a57b46379b870c4c639e2051971817f47b391 | 3da9f35eddd33bd3dfb30801d0c297b04470c6ff | refs/heads/master | 2021-06-20T00:21:54.561059 | 2021-03-04T14:50:28 | 2021-03-04T14:50:28 | 193,145,505 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 574 | py | from conans import ConanFile, CMake, tools
class ConanRecipe(ConanFile):
name = "cmake_template"
version = "0.0.1"
build_requires = "doctest/2.3.8"
settings = "os", "compiler", "build_type", "arch"
generators = "cmake_paths"
scm = {
"type": "git",
"url": "https://github.com/trondhe/cmake_template.git",
"revision": "auto"
}
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
cmake = CMake(self)
cmake.test()
cmake.install()
| [
"trondhe@gmail.com"
] | trondhe@gmail.com |
b0654c2a2d79501a23167110aa08c91d2f74bc55 | ff99c677aba11e27c252f773b52cd54f5de79279 | /ctt-server/openapi_server/models/test_artifact.py | eb26e77d966f8b9e136f61f7fd8c85e4776ebb27 | [
"Apache-2.0"
] | permissive | radon-h2020/radon-ctt | b7eeb82f59e36e2a258d0a2ba9cd9483eb3dd247 | 97fcf5e800a0129d24e119b430d94f07ca248ba9 | refs/heads/master | 2023-01-04T23:44:49.611599 | 2021-09-15T15:34:41 | 2021-09-15T15:34:41 | 235,379,642 | 0 | 7 | Apache-2.0 | 2022-12-27T15:56:38 | 2020-01-21T15:48:45 | Python | UTF-8 | Python | false | false | 5,920 | py | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi_server.models.base_model_ import Model
from openapi_server import util
class TestArtifact(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, uuid=None, project_uuid=None, sut_tosca_path=None, sut_inputs_path=None, ti_tosca_path=None, ti_inputs_path=None, commit_hash=None): # noqa: E501
"""TestArtifact - a model defined in OpenAPI
:param uuid: The uuid of this TestArtifact. # noqa: E501
:type uuid: str
:param project_uuid: The project_uuid of this TestArtifact. # noqa: E501
:type project_uuid: str
:param sut_tosca_path: The sut_tosca_path of this TestArtifact. # noqa: E501
:type sut_tosca_path: str
:param ti_tosca_path: The ti_tosca_path of this TestArtifact. # noqa: E501
:type ti_tosca_path: str
:param commit_hash: The commit_hash of this TestArtifact. # noqa: E501
:type commit_hash: str
"""
self.openapi_types = {
'uuid': str,
'project_uuid': str,
'sut_tosca_path': str,
'sut_inputs_path': str,
'ti_tosca_path': str,
'ti_inputs_path': str,
'commit_hash': str
}
self.attribute_map = {
'uuid': 'uuid',
'project_uuid': 'project_uuid',
'sut_tosca_path': 'sut_tosca_path',
'sut_inputs_path': 'sut_inputs_path',
'ti_tosca_path': 'ti_tosca_path',
'ti_inputs_path': 'ti_inputs_path',
'commit_hash': 'commit_hash'
}
self._uuid = uuid
self._project_uuid = project_uuid
self._sut_tosca_path = sut_tosca_path
self._sut_inputs_path = sut_inputs_path
self._ti_tosca_path = ti_tosca_path
self._ti_inputs_path = ti_inputs_path
self._commit_hash = commit_hash
@classmethod
def from_dict(cls, dikt) -> 'TestArtifact':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The TestArtifact of this TestArtifact. # noqa: E501
:rtype: TestArtifact
"""
return util.deserialize_model(dikt, cls)
@property
def uuid(self):
"""Gets the uuid of this TestArtifact.
:return: The uuid of this TestArtifact.
:rtype: str
"""
return self._uuid
@uuid.setter
def uuid(self, uuid):
"""Sets the uuid of this TestArtifact.
:param uuid: The uuid of this TestArtifact.
:type uuid: str
"""
self._uuid = uuid
@property
def project_uuid(self):
"""Gets the project_uuid of this TestArtifact.
:return: The project_uuid of this TestArtifact.
:rtype: str
"""
return self._project_uuid
@project_uuid.setter
def project_uuid(self, project_uuid):
"""Sets the project_uuid of this TestArtifact.
:param project_uuid: The project_uuid of this TestArtifact.
:type project_uuid: str
"""
self._project_uuid = project_uuid
@property
def sut_tosca_path(self):
"""Gets the sut_tosca_path of this TestArtifact.
:return: The sut_tosca_path of this TestArtifact.
:rtype: str
"""
return self._sut_tosca_path
@sut_tosca_path.setter
def sut_tosca_path(self, sut_tosca_path):
"""Sets the sut_tosca_path of this TestArtifact.
:param sut_tosca_path: The sut_tosca_path of this TestArtifact.
:type sut_tosca_path: str
"""
self._sut_tosca_path = sut_tosca_path
@property
def sut_inputs_path(self):
"""Gets the sut_inputs_path of this TestArtifact.
:return: The sut_inputs_path of this TestArtifact.
:rtype: str
"""
return self._sut_inputs_path
@sut_inputs_path.setter
def sut_inputs_path(self, sut_inputs_path):
"""Sets the sut_inputs_path of this TestArtifact.
:param sut_inputs_path: The sut_tosca_path of this TestArtifact.
:type sut_inputs_path: str
"""
self._sut_inputs_path = sut_inputs_path
@property
def ti_tosca_path(self):
"""Gets the ti_tosca_path of this TestArtifact.
:return: The ti_tosca_path of this TestArtifact.
:rtype: str
"""
return self._ti_tosca_path
@ti_tosca_path.setter
def ti_tosca_path(self, ti_tosca_path):
"""Sets the ti_tosca_path of this TestArtifact.
:param ti_tosca_path: The ti_tosca_path of this TestArtifact.
:type ti_tosca_path: str
"""
self._ti_tosca_path = ti_tosca_path
@property
def ti_inputs_path(self):
"""Gets the ti_inputs_path of this TestArtifact.
:return: The ti_inputs_path of this TestArtifact.
:rtype: str
"""
return self._ti_inputs_path
@ti_inputs_path.setter
def ti_inputs_path(self, ti_inputs_path):
"""Sets the ti_inputs_path of this TestArtifact.
:param ti_inputs_path: The ti_tosca_path of this TestArtifact.
:type ti_inputs_path: str
"""
self._ti_inputs_path = ti_inputs_path
@property
def commit_hash(self):
"""Gets the commit_hash of this TestArtifact.
:return: The commit_hash of this TestArtifact.
:rtype: str
"""
return self._commit_hash
@commit_hash.setter
def commit_hash(self, commit_hash):
"""Sets the commit_hash of this TestArtifact.
:param commit_hash: The commit_hash of this TestArtifact.
:type commit_hash: str
"""
self._commit_hash = commit_hash
| [
"duellmann@iste.uni-stuttgart.de"
] | duellmann@iste.uni-stuttgart.de |
55172d74274a5029a46af2e63ad7bc84ef53715a | d2bd1931ae47ded0ee016446aa1a7cfd8bd8c2ca | /examples/async_reconnecting_ssl_client.py | 466e7e09da8cc61a2197f49c2c2d73241e855424 | [
"BSD-2-Clause"
] | permissive | odidev/pycapnp | b16cfb1e5add27a2cbfda887eaf04185830a9c43 | 6aa59476a021cef601b0a7193a8bc7573aad31c0 | refs/heads/master | 2023-07-13T03:30:16.545614 | 2021-06-08T04:30:48 | 2021-06-08T04:30:48 | 399,744,596 | 0 | 0 | BSD-2-Clause | 2021-08-25T10:34:18 | 2021-08-25T08:25:25 | null | UTF-8 | Python | false | false | 4,743 | py | #!/usr/bin/env python3
import asyncio
import argparse
import os
import time
import socket
import ssl
import capnp
import thread_capnp
this_dir = os.path.dirname(os.path.abspath(__file__))
capnp.remove_event_loop()
capnp.create_event_loop(threaded=True)
def parse_args():
parser = argparse.ArgumentParser(usage='Connects to the Example thread server \
at the given address and does some RPCs')
parser.add_argument("host", help="HOST:PORT")
return parser.parse_args()
class StatusSubscriber(thread_capnp.Example.StatusSubscriber.Server):
'''An implementation of the StatusSubscriber interface'''
def status(self, value, **kwargs):
print('status: {}'.format(time.time()))
async def myreader(client, reader):
while True:
try:
# Must be a wait_for in order to give watch_connection a slot
# to try again
data = await asyncio.wait_for(reader.read(4096), timeout=1.0)
except asyncio.TimeoutError:
continue
client.write(data)
async def mywriter(client, writer):
while True:
try:
# Must be a wait_for in order to give watch_connection a slot
# to try again
data = await asyncio.wait_for(client.read(4096), timeout=1.0)
writer.write(data.tobytes())
except asyncio.TimeoutError:
continue
async def watch_connection(cap):
while True:
try:
await asyncio.wait_for(cap.alive().a_wait(), timeout=5)
await asyncio.sleep(1)
except asyncio.TimeoutError:
print("Watch timeout!")
asyncio.get_running_loop().stop()
return False
async def background(cap):
subscriber = StatusSubscriber()
promise = cap.subscribeStatus(subscriber)
await promise.a_wait()
async def main(host):
host = host.split(':')
addr = host[0]
port = host[1]
# Setup SSL context
ctx = ssl.create_default_context(ssl.Purpose.SERVER_AUTH, cafile=os.path.join(this_dir, 'selfsigned.cert'))
# Handle both IPv4 and IPv6 cases
try:
print("Try IPv4")
reader, writer = await asyncio.open_connection(
addr, port,
ssl=ctx,
family=socket.AF_INET
)
except OSError:
print("Try IPv6")
try:
reader, writer = await asyncio.open_connection(
addr, port,
ssl=ctx,
family=socket.AF_INET6
)
except OSError:
return False
# Start TwoPartyClient using TwoWayPipe (takes no arguments in this mode)
client = capnp.TwoPartyClient()
cap = client.bootstrap().cast_as(thread_capnp.Example)
# Start watcher to restart socket connection if it is lost
overalltasks = []
watcher = [watch_connection(cap)]
overalltasks.append(asyncio.gather(*watcher, return_exceptions=True))
# Assemble reader and writer tasks, run in the background
coroutines = [myreader(client, reader), mywriter(client, writer)]
overalltasks.append(asyncio.gather(*coroutines, return_exceptions=True))
# Start background task for subscriber
tasks = [background(cap)]
overalltasks.append(asyncio.gather(*tasks, return_exceptions=True))
# Run blocking tasks
print('main: {}'.format(time.time()))
await cap.longRunning().a_wait()
print('main: {}'.format(time.time()))
await cap.longRunning().a_wait()
print('main: {}'.format(time.time()))
await cap.longRunning().a_wait()
print('main: {}'.format(time.time()))
for task in overalltasks:
task.cancel()
return True
if __name__ == '__main__':
# Using asyncio.run hits an asyncio ssl bug
# https://bugs.python.org/issue36709
# asyncio.run(main(parse_args().host), loop=loop, debug=True)
retry = True
while retry:
loop = asyncio.new_event_loop()
try:
retry = not loop.run_until_complete(main(parse_args().host))
except RuntimeError:
# If an IO is hung, the event loop will be stopped
# and will throw RuntimeError exception
continue
if retry:
time.sleep(1)
print("Retrying...")
# How this works
# - There are two retry mechanisms
# 1. Connection retry
# 2. alive RPC verification
# - The connection retry just loops the connection (IPv4+IPv6 until there is a connection or Ctrl+C)
# - The alive RPC verification attempts a very basic rpc call with a timeout
# * If there is a timeout, stop the current event loop
# * Use the RuntimeError exception to force a reconnect
# * myreader and mywriter must also be wrapped in wait_for in order for the events to get triggered correctly
| [
"haata@kiibohd.com"
] | haata@kiibohd.com |
aa2912c2ef41acedbaee3ebac7e817473026f59f | 75ed582edd3f1ec38a93dfe366a74a335c28df53 | /tsbe-2018fs/scri/session3/aufgabe_kantone.py | df4c086cbb4475fa16589f2b8d2411535c2322bf | [] | no_license | smlz/smlz.github.io | 9bec54265cafaa84d33bfb2069a48bb51dedc4ad | de49cc0b6986e031aa702c044c26fa9b234d3fef | refs/heads/master | 2020-12-20T03:28:13.770377 | 2019-05-01T20:04:22 | 2019-05-01T20:04:22 | 30,535,346 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 315 | py |
kuerzel = input("Geben sie ein Kantonskürzel ein: ")
# Beispielsession:
# Geben sie ein Kantonskürzel ein: GR
# Der Hauptort ders Kantons Graubünden ist Chur.
# Geben sie ein Kantonskürzel ein: AB
# Fehler: Kein Kanton mit dem Kürzel 'AB' gefunden
# Geben sie ein Kantonskürzel ein: q
# Auf wiedersehen...
| [
"marco@schess.ch"
] | marco@schess.ch |
c0a784d30dad0c513d92a9c68106968303511b43 | 25bfbef581c596efc5e4fa45e00302226c7d4174 | /Baek_2231.py | 8a6bcca2fceae73b83ab6205537d8afacd37ac0f | [] | no_license | rukipgus/Algorithm | 1077afee5d829d0db46dd14ee40b42ca175d1922 | 30faa2f2ee62dc5a5c8a3e3c0ebfb67f09c10bbd | refs/heads/master | 2020-07-30T04:10:54.313824 | 2020-05-21T05:08:26 | 2020-05-21T05:08:26 | 210,080,888 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | a = int(input())
b = 0
for i in range(a):
if sum(map(int, list(str(i))))+i == a:
b = i
break
print(b) | [
"noreply@github.com"
] | noreply@github.com |
5ca72c6c700407fd220a82ddff1badf68a2fed1c | d06f858599b9ba34dbaf376275188c3cd90ccd79 | /ex1b_logreg.py | ae8e9304d414eaa4992d08576e4eb502a77df208 | [] | no_license | pprivulet/UFLDL | 950eaf3dc683e4d426475491d746c93830d85ee8 | 0725c1badd41d45e16a1a48e4b15496530775c08 | refs/heads/master | 2021-01-21T16:52:44.120723 | 2016-02-24T01:51:08 | 2016-02-24T01:51:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,666 | py | from scipy.optimize import fmin_cg
import numpy as np
from load_mnist import *
def sigmoid(z):
return 1.0 / (1 + np.power(np.e, -z))
def gradient(theta, X, y):
m = X.shape[0]
h = sigmoid(X.dot(theta.T)).reshape(m, 1)
errors = 1.0/m * (h - y).reshape(m, 1)
g = X.T.dot(errors)
return np.ndarray.flatten(g)
def cost_regression(theta, X, y):
m = X.shape[0]
h = sigmoid(X.dot(theta.T)).reshape(m, 1)
J = 1.0/m * -(y.T.dot(np.log(h)) + (1 - y.T).dot(np.log(1 - h))).sum()
# print(J)
return J
def logistic_regression(X, y):
m, n = X.shape
X = np.hstack((np.ones((m, 1)), X))
m, n = X.shape
theta = np.random.random(n).reshape(1, n)
res = fmin_cg(cost_regression, theta, fprime=gradient,
args=(X, y), maxiter=200, disp=True)
theta = res
# print('Theta: %s' % str(theta))
return theta
def get_accuracy(theta, test_X, test_y):
m, n = test_X.shape
test_X = np.hstack((np.ones((m, 1)), test_X))
h = sigmoid(test_X.dot(theta.T)).reshape(m, 1)
correct = ((test_y == 1) == (h > 0.5)).sum(dtype=np.float32)
return correct / m
train_images = 'data/common/train-images-idx3-ubyte'
train_labels = 'data/common/train-labels-idx1-ubyte'
test_images = 'data/common/t10k-images-idx3-ubyte'
test_labels = 'data/common/t10k-labels-idx1-ubyte'
print('Loading train data...')
train_X, train_y = prepare_data(train_images, train_labels)
print('Loading test data...')
test_X, test_y = prepare_data(test_images, test_labels)
theta = logistic_regression(train_X, train_y)
accuracy = get_accuracy(theta, test_X, test_y)
print('Accuracy: %2.1f' % (100 * accuracy))
| [
"mehrshad.shams@gmail.com"
] | mehrshad.shams@gmail.com |
2d661f3c74df52da90897d2e5452fbd1684e8bd0 | 6c1e361d39444d50e3e0b69bcc28edc6cd770744 | /tango_with_django_project/rango/migrations/0007_userprofile.py | 5315d0185fee73317cd1ccdf80d7a65c99dd2a80 | [] | no_license | CoffeeDrinker-22302/WAD2 | 40e3d7a04e8cd2a3b5220fbc70e187cad9421acb | b233a468de121123a3efb1adb0f07c05dcf980f4 | refs/heads/master | 2021-01-11T21:13:26.410625 | 2017-02-09T21:47:09 | 2017-02-09T21:47:09 | 79,270,876 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 923 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-06 19:27
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('rango', '0006_auto_20170129_2323'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('website', models.URLField(blank=True)),
('picture', models.ImageField(blank=True, upload_to='profile_images')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"2301714T@student.gla.ac.uk"
] | 2301714T@student.gla.ac.uk |
df7a9c0e24ac326304c81886a08987ad4ec9854a | b7b1608203e018cf258826ff92c3234db896b33c | /UI1/Old/searchwindow.py | b621efb8fca096ad31e187c166d8b6fa715d4777 | [
"MIT"
] | permissive | UnclassedPenguin/AnimalTracker | 254a86090a35f6a16adaa3d3f1e216d6c9a44751 | ade8c55d3c9617e3a4013af9dcaefcbcc255f9fe | refs/heads/master | 2021-12-14T11:03:30.052075 | 2021-12-07T20:40:58 | 2021-12-07T20:40:58 | 159,107,460 | 1 | 0 | null | 2021-12-07T19:58:44 | 2018-11-26T03:53:32 | Python | UTF-8 | Python | false | false | 15,412 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'searchwindow3.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(616, 339)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.label_8 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_8.setFont(font)
self.label_8.setAlignment(QtCore.Qt.AlignCenter)
self.label_8.setObjectName("label_8")
self.gridLayout.addWidget(self.label_8, 3, 7, 1, 3)
self.label_5 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_5.setFont(font)
self.label_5.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_5.setObjectName("label_5")
self.gridLayout.addWidget(self.label_5, 6, 1, 1, 1)
self.label_3 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_3.setFont(font)
self.label_3.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_3.setObjectName("label_3")
self.gridLayout.addWidget(self.label_3, 5, 1, 1, 1)
self.tableBox = QtWidgets.QComboBox(self.centralwidget)
self.tableBox.setObjectName("tableBox")
self.gridLayout.addWidget(self.tableBox, 5, 7, 1, 3)
self.orderbyBox = QtWidgets.QComboBox(self.centralwidget)
self.orderbyBox.setObjectName("orderbyBox")
self.gridLayout.addWidget(self.orderbyBox, 8, 7, 1, 3)
self.distinctcbButton = QtWidgets.QCheckBox(self.centralwidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.distinctcbButton.setFont(font)
self.distinctcbButton.setObjectName("distinctcbButton")
self.gridLayout.addWidget(self.distinctcbButton, 6, 7, 1, 1)
self.orderby2Box = QtWidgets.QComboBox(self.centralwidget)
self.orderby2Box.setObjectName("orderby2Box")
self.gridLayout.addWidget(self.orderby2Box, 11, 7, 1, 3)
self.datecbButton = QtWidgets.QCheckBox(self.centralwidget)
self.datecbButton.setMinimumSize(QtCore.QSize(100, 0))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.datecbButton.setFont(font)
self.datecbButton.setObjectName("datecbButton")
self.gridLayout.addWidget(self.datecbButton, 11, 2, 1, 1)
self.toggleallButton = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.toggleallButton.setFont(font)
self.toggleallButton.setObjectName("toggleallButton")
self.gridLayout.addWidget(self.toggleallButton, 10, 1, 1, 1)
self.mothercbButton = QtWidgets.QCheckBox(self.centralwidget)
self.mothercbButton.setMinimumSize(QtCore.QSize(100, 0))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.mothercbButton.setFont(font)
self.mothercbButton.setObjectName("mothercbButton")
self.gridLayout.addWidget(self.mothercbButton, 13, 2, 1, 1)
self.id2Entry = QtWidgets.QLineEdit(self.centralwidget)
self.id2Entry.setObjectName("id2Entry")
self.gridLayout.addWidget(self.id2Entry, 7, 2, 1, 2)
self.name2Entry = QtWidgets.QLineEdit(self.centralwidget)
self.name2Entry.setObjectName("name2Entry")
self.gridLayout.addWidget(self.name2Entry, 8, 2, 1, 2)
self.label_6 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_6.setFont(font)
self.label_6.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_6.setObjectName("label_6")
self.gridLayout.addWidget(self.label_6, 7, 1, 1, 1)
self.label_4 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_4.setFont(font)
self.label_4.setAlignment(QtCore.Qt.AlignCenter)
self.label_4.setObjectName("label_4")
self.gridLayout.addWidget(self.label_4, 7, 7, 1, 3)
self.group2Box = QtWidgets.QComboBox(self.centralwidget)
self.group2Box.setObjectName("group2Box")
self.gridLayout.addWidget(self.group2Box, 5, 2, 1, 1)
self.label_7 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_7.setFont(font)
self.label_7.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_7.setObjectName("label_7")
self.gridLayout.addWidget(self.label_7, 8, 1, 1, 1)
self.hoofconcbButton = QtWidgets.QCheckBox(self.centralwidget)
self.hoofconcbButton.setMinimumSize(QtCore.QSize(100, 0))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.hoofconcbButton.setFont(font)
self.hoofconcbButton.setObjectName("hoofconcbButton")
self.gridLayout.addWidget(self.hoofconcbButton, 14, 3, 1, 1)
self.label_9 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_9.setFont(font)
self.label_9.setAlignment(QtCore.Qt.AlignCenter)
self.label_9.setObjectName("label_9")
self.gridLayout.addWidget(self.label_9, 10, 7, 1, 3)
self.rfidcbButton = QtWidgets.QCheckBox(self.centralwidget)
self.rfidcbButton.setMinimumSize(QtCore.QSize(100, 0))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.rfidcbButton.setFont(font)
self.rfidcbButton.setObjectName("rfidcbButton")
self.gridLayout.addWidget(self.rfidcbButton, 12, 1, 1, 1)
self.namecbButton = QtWidgets.QCheckBox(self.centralwidget)
self.namecbButton.setMinimumSize(QtCore.QSize(100, 0))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.namecbButton.setFont(font)
self.namecbButton.setChecked(True)
self.namecbButton.setObjectName("namecbButton")
self.gridLayout.addWidget(self.namecbButton, 11, 1, 1, 1)
self.fathercbButton = QtWidgets.QCheckBox(self.centralwidget)
self.fathercbButton.setMinimumSize(QtCore.QSize(100, 0))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.fathercbButton.setFont(font)
self.fathercbButton.setObjectName("fathercbButton")
self.gridLayout.addWidget(self.fathercbButton, 14, 2, 1, 1)
self.idcbButton = QtWidgets.QCheckBox(self.centralwidget)
self.idcbButton.setMinimumSize(QtCore.QSize(100, 0))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.idcbButton.setFont(font)
self.idcbButton.setObjectName("idcbButton")
self.gridLayout.addWidget(self.idcbButton, 13, 1, 1, 1)
self.bdaycbButton = QtWidgets.QCheckBox(self.centralwidget)
self.bdaycbButton.setMinimumSize(QtCore.QSize(100, 0))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.bdaycbButton.setFont(font)
self.bdaycbButton.setObjectName("bdaycbButton")
self.gridLayout.addWidget(self.bdaycbButton, 12, 2, 1, 1)
self.groupcbButton = QtWidgets.QCheckBox(self.centralwidget)
self.groupcbButton.setMinimumSize(QtCore.QSize(100, 0))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.groupcbButton.setFont(font)
self.groupcbButton.setObjectName("groupcbButton")
self.gridLayout.addWidget(self.groupcbButton, 14, 1, 1, 1)
self.soldcbButton = QtWidgets.QCheckBox(self.centralwidget)
self.soldcbButton.setMinimumSize(QtCore.QSize(100, 0))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.soldcbButton.setFont(font)
self.soldcbButton.setObjectName("soldcbButton")
self.gridLayout.addWidget(self.soldcbButton, 12, 5, 1, 1)
self.rfid2Entry = QtWidgets.QLineEdit(self.centralwidget)
self.rfid2Entry.setObjectName("rfid2Entry")
self.gridLayout.addWidget(self.rfid2Entry, 6, 2, 1, 2)
self.notescbButton = QtWidgets.QCheckBox(self.centralwidget)
self.notescbButton.setMinimumSize(QtCore.QSize(100, 0))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.notescbButton.setFont(font)
self.notescbButton.setObjectName("notescbButton")
self.gridLayout.addWidget(self.notescbButton, 11, 5, 1, 1)
self.weightcbButton = QtWidgets.QCheckBox(self.centralwidget)
self.weightcbButton.setMinimumSize(QtCore.QSize(100, 0))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.weightcbButton.setFont(font)
self.weightcbButton.setObjectName("weightcbButton")
self.gridLayout.addWidget(self.weightcbButton, 12, 3, 1, 1)
self.gendercbButton = QtWidgets.QCheckBox(self.centralwidget)
self.gendercbButton.setMinimumSize(QtCore.QSize(100, 0))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.gendercbButton.setFont(font)
self.gendercbButton.setObjectName("gendercbButton")
self.gridLayout.addWidget(self.gendercbButton, 11, 3, 1, 1)
self.bodyconcbButton = QtWidgets.QCheckBox(self.centralwidget)
self.bodyconcbButton.setMinimumSize(QtCore.QSize(100, 0))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.bodyconcbButton.setFont(font)
self.bodyconcbButton.setObjectName("bodyconcbButton")
self.gridLayout.addWidget(self.bodyconcbButton, 13, 3, 1, 1)
self.genealogyButton = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.genealogyButton.setFont(font)
self.genealogyButton.setObjectName("genealogyButton")
self.gridLayout.addWidget(self.genealogyButton, 13, 5, 1, 1)
self.testButton = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.testButton.setFont(font)
self.testButton.setObjectName("testButton")
self.gridLayout.addWidget(self.testButton, 14, 5, 1, 1)
self.search2Button = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.search2Button.setFont(font)
self.search2Button.setObjectName("search2Button")
self.gridLayout.addWidget(self.search2Button, 13, 7, 1, 1)
self.save2Button = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.save2Button.setFont(font)
self.save2Button.setObjectName("save2Button")
self.gridLayout.addWidget(self.save2Button, 14, 7, 1, 1)
self.clear2Button = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.clear2Button.setFont(font)
self.clear2Button.setObjectName("clear2Button")
self.gridLayout.addWidget(self.clear2Button, 13, 8, 1, 1)
self.quit2Button = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.quit2Button.setFont(font)
self.quit2Button.setObjectName("quit2Button")
self.gridLayout.addWidget(self.quit2Button, 14, 8, 1, 1)
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setMaximumSize(QtCore.QSize(16777215, 30))
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 3, 1, 1, 3)
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Animal Tracker - Advanced Search"))
self.label_8.setText(_translate("MainWindow", "Table:"))
self.label_5.setText(_translate("MainWindow", "RFID:"))
self.label_3.setText(_translate("MainWindow", "Group:"))
self.distinctcbButton.setText(_translate("MainWindow", "Distinct"))
self.datecbButton.setText(_translate("MainWindow", "Date"))
self.toggleallButton.setText(_translate("MainWindow", "Select All"))
self.mothercbButton.setText(_translate("MainWindow", "Mother"))
self.label_6.setText(_translate("MainWindow", "ID:"))
self.label_4.setText(_translate("MainWindow", "Order By:"))
self.label_7.setText(_translate("MainWindow", "Name:"))
self.hoofconcbButton.setText(_translate("MainWindow", "Hoof Cond."))
self.label_9.setText(_translate("MainWindow", "Order By 2:"))
self.rfidcbButton.setText(_translate("MainWindow", "RFID"))
self.namecbButton.setText(_translate("MainWindow", "Name"))
self.fathercbButton.setText(_translate("MainWindow", "Father"))
self.idcbButton.setText(_translate("MainWindow", "ID"))
self.bdaycbButton.setText(_translate("MainWindow", "Birthday"))
self.groupcbButton.setText(_translate("MainWindow", "Group"))
self.soldcbButton.setText(_translate("MainWindow", "Sold"))
self.notescbButton.setText(_translate("MainWindow", "Notes"))
self.weightcbButton.setText(_translate("MainWindow", "Weight"))
self.gendercbButton.setText(_translate("MainWindow", "Gender"))
self.bodyconcbButton.setText(_translate("MainWindow", "Body Cond."))
self.genealogyButton.setText(_translate("MainWindow", "Genealogy"))
self.testButton.setText(_translate("MainWindow", "Test"))
self.search2Button.setText(_translate("MainWindow", "Search"))
self.save2Button.setText(_translate("MainWindow", "Save"))
self.clear2Button.setText(_translate("MainWindow", "Clear"))
self.quit2Button.setText(_translate("MainWindow", "Close"))
self.label.setText(_translate("MainWindow", "Advanced Search"))
| [
"tvanden@live.com"
] | tvanden@live.com |
9e659d3dac0f1d116cf3bf103dea84c8451b82e1 | 6bdccbb5c387629078dba012f0bf325848631916 | /a1_classes.py | 995bc6cff683a2d41862aaefb23a987aaf3af6f4 | [] | no_license | jc527847/A2 | 97716d7be8445d5053b888f7609e0652e90cb092 | 9e514481c005e4d93a7a2035aa23cfc44be79975 | refs/heads/master | 2022-12-21T11:00:06.142266 | 2020-09-27T12:28:58 | 2020-09-27T12:28:58 | 299,025,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,261 | py | from placecollection import PlaceCollection
MENU_STRING = "\nL - List places\nA - Add new place\nM - Mark a place as visited\nQ - Quit\n"
place_collection = PlaceCollection()
def main():
place_collection.load_places()
print("Places To Visit 1.0 - by <Chen chen>")
menu()
def place_list():
"""Shows the place list"""
count = 1
for place in place_collection.places:
if place.status == "u":
print("{}.* {}".format(count, place))
else:
print("{}. {}".format(count, place))
count += 1
print('{} places visited, {} places still to visit'.format(place_collection.visited_places_count(),
place_collection.unvisited_places_count()))
def visit_place():
total = place_collection.visited_places_count() + place_collection.unvisited_places_count()
print("Enter the number of a place to mark as visited")
"""Error checking"""
finished = False
while not finished:
try:
choice = int(input(">>> "))
if 0 < choice <= total:
# check if the place is visited
if place_collection.select_place(choice).status == "v":
print("You have already visited")
else:
print("{} from {} visited".format(place_collection.select_place(choice).city,
place_collection.select_place(choice).country))
place_collection.select_place(choice).status = "v"
finished = True
elif choice >= total:
print("Invalid place number")
else:
print("Number must be > 0")
except ValueError:
print("Invalid input; enter a valid number")
def add_place():
city = input("city:")
while not title.strip():
print("Input can not be blank")
city = input("City:")
country = ""
finished = False
while not finished:
try:
country = int(input("Country:"))
if country >= 0:
finished = True
else:
print("Number must be >= 0")
except ValueError:
print("Invalid input; enter a valid number")
priority = input("Priority:")
while not priority.strip():
print("Input can not be blank")
priority = input("Priority:")
print("{} ({} from {}) added to place list".format(city, country, priority))
def menu():
print("Menu:")
print(MENU_STRING)
choice = input(">>> ").upper()
while choice != "Q":
if choice == "L":
movie_list()
elif choice == "A":
add_movie()
elif choice == "M":
visit_place(choice)
else:
print("Invalid choice")
print("Menu:")
print(MENU_STRING)
choice = input(">>> ").upper()
print("{} places saved to place.csv".format(
place_collection.unvisited_places_count() + place_collection.watched_places_count()))
print("Have a good time")
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | noreply@github.com |
9238735c616b761d46ddac069fc56687c03ed507 | 26de9d17042ff75b46c041ee3000fb29aed19197 | /ITinformation/apps/job/migrations/0004_auto_20200320_1719.py | dd3d3ea0f520634a1a4dcf4fd0c3d0f7a5e5e9f0 | [] | no_license | cdlsy/ITinfo | b8ba9c28c4435474184f085942d3be54d5c9c00e | 22bc7374ebec85e952378f213806ca90874396a5 | refs/heads/master | 2021-05-21T16:43:38.571020 | 2020-04-03T12:24:43 | 2020-04-03T12:24:43 | 252,721,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,147 | py | # Generated by Django 2.2 on 2020-03-20 09:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('job', '0003_auto_20200320_1702'),
]
operations = [
migrations.AddField(
model_name='jobssku',
name='fromsite',
field=models.CharField(blank=True, max_length=20, null=True, verbose_name='数据来源'),
),
migrations.AlterField(
model_name='jobssku',
name='Size',
field=models.CharField(blank=True, max_length=20, null=True, verbose_name='单位规模'),
),
migrations.AlterField(
model_name='jobssku',
name='city',
field=models.CharField(blank=True, max_length=20, null=True, verbose_name='岗位地点'),
),
migrations.AlterField(
model_name='jobssku',
name='company',
field=models.CharField(blank=True, max_length=20, null=True, verbose_name='岗位单位'),
),
migrations.AlterField(
model_name='jobssku',
name='desc',
field=models.CharField(blank=True, max_length=1256, null=True, verbose_name='岗位简介'),
),
migrations.AlterField(
model_name='jobssku',
name='education',
field=models.CharField(blank=True, max_length=20, null=True, verbose_name='教育程度'),
),
migrations.AlterField(
model_name='jobssku',
name='financeStage',
field=models.CharField(blank=True, max_length=20, null=True, verbose_name='单位类型'),
),
migrations.AlterField(
model_name='jobssku',
name='industryField',
field=models.CharField(blank=True, max_length=20, null=True, verbose_name='行业领域'),
),
migrations.AlterField(
model_name='jobssku',
name='name',
field=models.CharField(blank=True, max_length=20, null=True, verbose_name='岗位名称'),
),
migrations.AlterField(
model_name='jobssku',
name='number',
field=models.IntegerField(blank=True, default=0, null=True, verbose_name='招聘人数'),
),
migrations.AlterField(
model_name='jobssku',
name='salary',
field=models.IntegerField(blank=True, null=True, verbose_name='职薪'),
),
migrations.AlterField(
model_name='jobssku',
name='status',
field=models.SmallIntegerField(blank=True, choices=[(0, '下线'), (1, '上线')], default=1, null=True, verbose_name='岗位状态'),
),
migrations.AlterField(
model_name='jobssku',
name='stock',
field=models.IntegerField(blank=True, default=1, null=True, verbose_name='岗位库存'),
),
migrations.AlterField(
model_name='jobssku',
name='workYear',
field=models.CharField(blank=True, max_length=20, null=True, verbose_name='岗位年限'),
),
]
| [
"962082877@qq.com"
] | 962082877@qq.com |
a29b5418c26e275543f353c8cef2bfca38fca308 | 0351c5c97c15b55549a3c6f766ecdb1746aad152 | /venv/bin/django-admin | c2224dcb57ca979c2d90b034a63cd0e41746a0d7 | [] | no_license | mradulovic988/shop | a52712cc84bb1c164c655122dffdb091cb3bd65e | 86d406bda8ec0cd63fc7beef4a94375e4671e339 | refs/heads/master | 2022-11-13T07:31:38.680293 | 2020-06-22T11:44:45 | 2020-06-22T11:44:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | #!/home/marko/PycharmProjects/PyShop/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"upss070288@gmail.com"
] | upss070288@gmail.com | |
08c532cde042d4bb8df8b5a3d1a5138fcf37f418 | 390a57a46029720ac827766142c95060d36b169e | /Lab1/lab1.py | 9fd0d0e4808ef9ab5a0f5be4504076395f8c27d8 | [] | no_license | segev19-meet/YL1-201718 | c2f338970066e63270fbf88b6d1d3c280289e0cb | 4ea30aef2506f5874683a5f7f776d9e320022d25 | refs/heads/master | 2021-08-23T21:40:49.933317 | 2017-12-06T17:26:32 | 2017-12-06T17:26:32 | 110,852,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42 | py | for i in range (3);
print("segev")
| [
"segev19@meet.mit.edu"
] | segev19@meet.mit.edu |
d5d8dd821a4ecfe8fd68e2dd4fdffdced4869e0f | f7df44ab53391592d438d4955752e655fc4d80aa | /login_xyz.py | 81a104b0784f66a3e009b102b38024c0b8c50c94 | [] | no_license | aaliyah-jardien/mysql-login | 2dccee71430220815ea24dc2c77633e28a4b059d | 520b97f44560a5fd695be2f2b5a26a2dd3493262 | refs/heads/main | 2023-06-07T19:41:25.607720 | 2021-06-29T11:06:56 | 2021-06-29T11:06:56 | 381,334,914 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,318 | py | from tkinter import *
from tkinter import messagebox
# WINDOW FEATURES
window = Tk()
window.geometry("500x250")
window.title("Login page")
window.config(bg="silver")
window.resizable(0, 0)
# LABELS
name_lab = Label(window, text="Please enter username :", font="arial 13 bold italic", bg="lime")
name_lab.place(x=30, y=30)
pass_lab = Label(window, text="Please enter password: ", font="arial 13 bold italic", bg="lime")
pass_lab.place(x=30, y=70)
# ENTRIES
name_ent = Entry(window, bg="lime")
name_ent.place(x=280, y=30)
pass_ent = Entry(window, bg="lime", show="*")
pass_ent.place(x=280, y=70)
# @Lifechoices1234
# FUNCTIONS
def clear_func():
sure = messagebox.askyesno(title="Alert", message="Are you sure you want to clear your information?")
if sure:
name_ent.delete(0, END)
pass_ent.delete(0, END)
else:
return None
# def login_func():
# BUTTONS
clear_btn = Button(window, text="Clear", command=clear_func, font="arial 12 bold", bg="lime", border="5")
clear_btn.place(x=30, y=140)
login_btn = Button(window, text="Login", command="", font="arial 12 bold", bg="lime", border="5")
login_btn.place(x=200, y=140)
regis_btn = Button(window, text="Register", command="", font="arial 12 bold", bg="lime", border="5")
regis_btn.place(x=340, y=140)
# FIN
window.mainloop()
| [
"aaliyahjar13@gmail.com"
] | aaliyahjar13@gmail.com |
b9071b7473d0802dc1743dd7203c33543df399ac | eb479fcf4486dc88d43253cad4f041531130b1f2 | /main.py | 0c800970affb21d7876cb3f3f9682e14bda448c7 | [] | no_license | stephaniecallejas/Lab-4 | 147dc1efb8c3b7936d8399aa9767cc6378f66004 | ff5a2b0554678f93567c6ec556e0fff16e8791f6 | refs/heads/master | 2020-04-06T08:19:37.107582 | 2018-11-28T17:22:39 | 2018-11-28T17:22:39 | 157,301,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,770 | py | # Coded by Stephanie Callejas
# Last Edit: 12 Nov 2018
# CS2302 Lab 4 B Project
# Instructors: Diego Aguirre and Saha, Manoj Pravakar
# Goal: Determine if a given anagram is a valid word in the english language using hash tables
from chaininghashtable import ChainingHashTable
# f is used to populate the tree with the words
f = open("words.txt")
word_list = f.readlines()
for i in range(len(word_list)):
word_list[i] = word_list[i][:-2] # -2 to get rid of the enter space
counter = 0
# The method print_anagrams shown below prints all the anagrams of a given word.
# To do this, it generates all permutations of the letters in the word and, for each permutation,
# it checks if it is a valid word in the English language.
# if you call print_anagrams("spot"),the method should print the following words: spot, stop, post, pots, opts, and tops
def print_anagrams(word, prefix=""):
if len(word) <= 1:
str = prefix + word
if str in word_list:
print(prefix + word)
else:
for i in range(len(word)):
cur = word[i: i + 1]
before = word[0: i] # letters before cur
after = word[i + 1:] # letters after cur
#if cur not in before: # Check if permutations of cur have not been generated.
print_anagrams(prefix + cur, before + after, word_list)
# Main program to test HashTable classes
keys = [35, 0, 22, 94, 220, 110, 4]
chaining = ChainingHashTable()
for key in keys:
chaining.insert(key)
# Show tables after inserts.
print ("Chaining: initial table:")
print (chaining)
print()
# Show tables after removing item 0
print ("=======================================")
chaining.remove(0)
print ("Chaining: after removing 0:")
print(chaining)
print()
| [
"scallejas2@miners.utep.edu"
] | scallejas2@miners.utep.edu |
89c4230d9e89f0f657b476e80fd43d8560fad2c0 | 89f0ac037468a738403325d0454d4cb9149f8f7f | /al4995_hw5_q7a.py | 586fe1aac7ca1c74fdb90f9af6a6f5b40b8c0c1d | [] | no_license | Jinsaeng/CS-Python | 85679d77b05b95ed660345f9d5fe896566e14685 | 0a15261ac2dd3e88527129d9170f405697f70be4 | refs/heads/master | 2020-05-01T18:30:39.515267 | 2019-03-25T16:45:33 | 2019-03-25T16:45:33 | 177,625,783 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 628 | py | num = input("Enter number in the simplified Roman system: ");
m=0
d=0
c=0
l=0
x=0
v=0
one=0
for i in range(0,len(num)):
if num[i] == "M":
m += 1;
elif num[i] == "D":
d += 1;
elif num[i] == "C":
c += 1
elif num[i] == "L":
l += 1
elif num[i] == "X":
x += 1
elif num[i] == "V":
v += 1
elif num[i] == "I":
one += 1
total = (m*1000) + (d*500) + (c*100) + (l*50) + (x*10) + (v*5) + (one*1)
print(m,d,c,v,l,x,v,one)
print(num, "is", total)
| [
"noreply@github.com"
] | noreply@github.com |
3d61916d02e4d77417e3002685e698eb6b3f478c | c32230a3f7aff555a33cc9e5ad994b552823de63 | /Utils/train.py | 03ee464be761178b573343a8d1c2b3bf6caa1d00 | [] | no_license | ShreyasMalakarjunPatil/PHEW | 819bcb8ad47f73f137f92d7611fedc26b59f3981 | c4a0d40be8582a8ea51a4eb01338202f62277225 | refs/heads/main | 2023-07-04T12:51:04.841810 | 2021-07-23T15:44:02 | 2021-07-23T15:44:02 | 364,422,694 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,248 | py | import torch
import pickle as pkl
import copy
from Prune import mag_utils
from Prune import Utils
def test(network, loss, dataloader, dev):
network.eval()
total = 0
correct1 = 0
correct5 = 0
with torch.no_grad():
for idx, (data, target) in enumerate(dataloader):
data = data.to(dev)
target = target.to(dev)
output = network(data)
total += loss(output, target).item() * data.size(0)
_, pred = output.topk(5, dim=1)
correct = pred.eq(target.view(-1,1).expand_as(pred))
correct1 += correct[:,:1].sum().item()
correct5 += correct[:,:5].sum().item()
avg_loss = total / len(dataloader.dataset)
acc1 = 100.0 * correct1 / len(dataloader.dataset)
acc5 = 100.0 * correct5 / len(dataloader.dataset)
print('Top 1 Accuracy =', acc1)
print('Top 5 Accuracy =', acc5)
print('Average Loss =', avg_loss)
return avg_loss, acc1, acc5
def train(network, loss, optimizer, train_loader, test_loader, dev, epochs, scheduler):
train_curve = []
accuracy1 = []
accuracy5 = []
test_loss = []
acc_max = 0.0
for epoch in range(epochs):
network.train()
train_loss = 0
for batch_idx, (data, target) in enumerate(train_loader):
data = data.to(dev)
target = target.to(dev)
optimizer.zero_grad()
output = network(data)
batch_loss = loss(output, target)
train_loss += batch_loss.item() * data.size(0)
batch_loss.backward()
optimizer.step()
if batch_idx % 100 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), batch_loss.item()))
train_curve.append(train_loss/len(train_loader.dataset))
avg_loss, acc1, acc5 = test(network, loss, test_loader, dev)
if acc1>acc_max:
net = copy.deepcopy(network)
acc_max = acc1
accuracy1.append(acc1)
accuracy5.append(acc5)
test_loss.append(avg_loss)
scheduler.step()
return net, train_curve, test_loss, accuracy1, accuracy5
def train_mag(network, loss, optimizer, train_loader, test_loader, dev, epochs, scheduler):
train_curve = []
accuracy1 = []
accuracy5 = []
test_loss = []
for epoch in range(epochs):
network.train()
train_loss = 0
for batch_idx, (data, target) in enumerate(train_loader):
data = data.to(dev)
target = target.to(dev)
optimizer.zero_grad()
output = network(data)
batch_loss = loss(output, target)
train_loss += batch_loss.item() * data.size(0)
batch_loss.backward()
optimizer.step()
if batch_idx % 100 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), batch_loss.item()))
train_curve.append(train_loss/len(train_loader.dataset))
avg_loss, acc1, acc5 = test(network, loss, test_loader, dev)
if acc1>acc_max:
net = copy.deepcopy(network)
acc_max = acc1
accuracy1.append(acc1)
accuracy5.append(acc5)
test_loss.append(avg_loss)
scheduler.step()
return net
def train_mag2(network, loss, optimizer, train_loader, test_loader, dev, epochs, prune_perc):
train_curve = []
accuracy1 = []
accuracy5 = []
test_loss = []
for epoch in range(epochs):
train_loss = 0
net = copy.deepcopy(network)
e = epoch + 1.0
p = prune_perc - prune_perc * ( 1.0 - e/epochs )**3
print(p)
if epoch > 0:
lkj = 0
for ppp in net.parameters():
if len(ppp.data.size()) != 1:
ppp.data = ppp.data * weight_masks[lkj]
lkj += 1
weight_masks, bias_masks = mag_utils.mag_prune_masks(net, p, dev)
network.set_masks(weight_masks, bias_masks)
network.to(dev)
network.train()
for batch_idx, (data, target) in enumerate(train_loader):
data = data.to(dev)
target = target.to(dev)
optimizer.zero_grad()
output = network(data)
batch_loss = loss(output, target)
train_loss += batch_loss.item() * data.size(0)
batch_loss.backward()
optimizer.step()
if batch_idx % 100 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), batch_loss.item()))
train_curve.append(train_loss/len(train_loader.dataset))
avg_loss, acc1, acc5 = test(network, loss, test_loader, dev)
accuracy1.append(acc1)
accuracy5.append(acc5)
test_loss.append(avg_loss)
Utils.ratio(net, weight_masks)
return network,weight_masks, bias_masks
| [
"sm_patil@gatech.edu"
] | sm_patil@gatech.edu |
8882a3047b104ab1b3c17264e0c530a6d08c6907 | 9610621437f025aa97f99b67f0a5d8e13bbb715c | /com/vmware/vcenter/inventory_client.py | 4a5e81930923bd8759f59470aacabd3026107b08 | [
"MIT"
] | permissive | adammillerio/vsphere-automation-sdk-python | 2b3b730db7da99f1313c26dc738b82966ecea6ce | c07e1be98615201139b26c28db3aa584c4254b66 | refs/heads/master | 2022-11-20T03:09:59.895841 | 2020-07-17T19:32:37 | 2020-07-17T19:32:37 | 280,499,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,717 | py | # -*- coding: utf-8 -*-
#---------------------------------------------------------------------------
# Copyright 2020 VMware, Inc. All rights reserved.
# AUTO GENERATED FILE -- DO NOT MODIFY!
#
# vAPI stub file for package com.vmware.vcenter.inventory.
#---------------------------------------------------------------------------
"""
The ``com.vmware.vcenter.inventory_client`` component provides methods and
classes for retrieving vCenter datastore and network information for a given
:class:`list` of identifiers.
"""
__author__ = 'VMware, Inc.'
__docformat__ = 'restructuredtext en'
import sys
from vmware.vapi.bindings import type
from vmware.vapi.bindings.converter import TypeConverter
from vmware.vapi.bindings.enum import Enum
from vmware.vapi.bindings.error import VapiError
from vmware.vapi.bindings.struct import VapiStruct
from vmware.vapi.bindings.stub import (
ApiInterfaceStub, StubFactoryBase, VapiInterface)
from vmware.vapi.bindings.common import raise_core_exception
from vmware.vapi.data.validator import (UnionValidator, HasFieldsOfValidator)
from vmware.vapi.exception import CoreException
from vmware.vapi.lib.constants import TaskType
from vmware.vapi.lib.rest import OperationRestMetadata
class Datastore(VapiInterface):
"""
The ``Datastore`` class provides methods to retrieve information about
datastores.
"""
_VAPI_SERVICE_ID = 'com.vmware.vcenter.inventory.datastore'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _DatastoreStub)
self._VAPI_OPERATION_IDS = {}
class Info(VapiStruct):
"""
The ``Datastore.Info`` class contains information about a datastore.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
type=None,
):
"""
:type type: :class:`str`
:param type: Type of the datastore.
When clients pass a value of this class as a parameter, the
attribute must be one of ``Datastore`` or ``StoragePod``. When
methods return a value of this class as a return value, the
attribute will be one of ``Datastore`` or ``StoragePod``.
"""
self.type = type
VapiStruct.__init__(self)
Info._set_binding_type(type.StructType(
'com.vmware.vcenter.inventory.datastore.info', {
'type': type.StringType(),
},
Info,
False,
None))
def find(self,
datastores,
):
"""
Returns datastore information for the specified datastores. The key in
the return value :class:`dict` is the datastore identifier and the
value in the :class:`dict` is the datastore information.
:type datastores: :class:`list` of :class:`str`
:param datastores: Identifiers of the datastores for which information will be
returned.
The parameter must contain identifiers for the resource type:
``Datastore``.
:rtype: :class:`dict` of :class:`str` and (:class:`Datastore.Info` or ``None``)
:return: Datastore information for the specified datastores. The key in the
return value :class:`dict` is the datastore identifier and the
value in the :class:`dict` is the datastore information.
The key in the return value :class:`dict` will be an identifier for
the resource type: ``Datastore``.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if no datastore can be found for one or more of the datastore
identifiers in ``datastores``
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have all of the privileges described as follows:
* Method execution requires ``System.Read``.
* The resource ``Datastore`` referenced by the parameter
``datastores`` requires ``System.Read``.
"""
return self._invoke('find',
{
'datastores': datastores,
})
class Network(VapiInterface):
"""
The ``Network`` class provides methods to retrieve information about
vCenter Server networks.
"""
_VAPI_SERVICE_ID = 'com.vmware.vcenter.inventory.network'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _NetworkStub)
self._VAPI_OPERATION_IDS = {}
class Info(VapiStruct):
"""
The ``Network.Info`` class contains information about a vCenter Server
network.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
type=None,
):
"""
:type type: :class:`str`
:param type: Type of the vCenter Server network.
When clients pass a value of this class as a parameter, the
attribute must be one of ``Network``,
``DistributedVirtualPortgroup``, or ``OpaqueNetwork``. When methods
return a value of this class as a return value, the attribute will
be one of ``Network``, ``DistributedVirtualPortgroup``, or
``OpaqueNetwork``.
"""
self.type = type
VapiStruct.__init__(self)
Info._set_binding_type(type.StructType(
'com.vmware.vcenter.inventory.network.info', {
'type': type.StringType(),
},
Info,
False,
None))
def find(self,
networks,
):
"""
Returns network information for the specified vCenter Server networks.
The key in the return value :class:`dict` is the network identifier and
the value in the :class:`dict` is the network information.
:type networks: :class:`list` of :class:`str`
:param networks: Identifiers of the vCenter Server networks for which information
will be returned.
The parameter must contain identifiers for the resource type:
``Network``.
:rtype: :class:`dict` of :class:`str` and (:class:`Network.Info` or ``None``)
:return: Network information for the specified vCenter Server networks. The
key in the return value :class:`dict` is the network identifier and
the value in the :class:`dict` is the network information.
The key in the return value :class:`dict` will be an identifier for
the resource type: ``Network``.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if no datastore can be found for one or more of the vCenter Server
network identifiers in ``networks``
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have all of the privileges described as follows:
* Method execution requires ``System.Read``.
* The resource ``Network`` referenced by the parameter ``networks``
requires ``System.Read``.
"""
return self._invoke('find',
{
'networks': networks,
})
class _DatastoreStub(ApiInterfaceStub):
def __init__(self, config):
# properties for find operation
find_input_type = type.StructType('operation-input', {
'datastores': type.ListType(type.IdType()),
})
find_error_dict = {
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
find_input_value_validator_list = [
]
find_output_validator_list = [
]
find_rest_metadata = None
operations = {
'find': {
'input_type': find_input_type,
'output_type': type.MapType(type.IdType(), type.OptionalType(type.ReferenceType(__name__, 'Datastore.Info'))),
'errors': find_error_dict,
'input_value_validator_list': find_input_value_validator_list,
'output_validator_list': find_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'find': find_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.vcenter.inventory.datastore',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=True)
class _NetworkStub(ApiInterfaceStub):
def __init__(self, config):
# properties for find operation
find_input_type = type.StructType('operation-input', {
'networks': type.ListType(type.IdType()),
})
find_error_dict = {
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
find_input_value_validator_list = [
]
find_output_validator_list = [
]
find_rest_metadata = None
operations = {
'find': {
'input_type': find_input_type,
'output_type': type.MapType(type.IdType(), type.OptionalType(type.ReferenceType(__name__, 'Network.Info'))),
'errors': find_error_dict,
'input_value_validator_list': find_input_value_validator_list,
'output_validator_list': find_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'find': find_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.vcenter.inventory.network',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=True)
class StubFactory(StubFactoryBase):
_attrs = {
'Datastore': Datastore,
'Network': Network,
}
| [
"miller@adammiller.io"
] | miller@adammiller.io |
edb5fcfcbb4bcff040dc2ab416705caba819a182 | 14864f5617bedcb5f1f756ca1da0df9585a4d605 | /apps/page2.py | 1bc71919a7e3d8a6cf8bac721e457bfc46539efa | [] | no_license | RyanMckenna95/Eurodash | ec60e4c0e47141bed41dc5f17c18ca8902c89caa | 71f9c634f29a4eccf478223410d520211c07a350 | refs/heads/master | 2022-12-09T21:48:19.233289 | 2020-09-20T12:20:06 | 2020-09-20T12:20:06 | 297,063,568 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,865 | py | import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output
from apps.navbar import Navbar
from app import app
from apps.eurostatapi import popAgeGroup
import plotly.graph_objs as go
import plotly.express as px
nav = Navbar()
year_options = []
for time in popAgeGroup()['time'].unique():
year_options.append({'label': str(time), 'value': time})
features = popAgeGroup().columns
body = html.Div([
html.Div([
html.H3('Population'),
html.Div(id='app-2-display-value'),
dcc.Link('Go to App 1', href='/apps/page1'),
html.Div([
html.Div([
dcc.Dropdown(id='xaxis',
options=[{'label': i, 'value': i} for i in features],
value='indic_de')
], style={'width': '48%', 'display': 'inline-block'}),
html.Div([
dcc.Dropdown(id='yaxis',
options=[{'label': i, 'value': i} for i in features],
value='values')
], style={'width': '48%', 'display': 'inline-block'}),
html.Div([
dcc.Dropdown(id='year-picker', options=year_options,
value=popAgeGroup()['time'].min())
])
]),
html.Div([
dcc.Graph(id='bar')
])
]),
], style={'padding': 5})
@app.callback(Output('scatter', 'figure'),
[Input('year-picker', 'value'),
Input('xaxis', 'value'),
Input('yaxis', 'value')])
def update_graph(picked_year, xaxis_name, yaxis_name):
filtered_df = popAgeGroup()[popAgeGroup()['time'] == picked_year]
traces = []
for country_name in filtered_df['geo'].unique():
df_by_country = filtered_df[filtered_df['geo'] == country_name]
traces.append(go.Scatter(x=df_by_country[xaxis_name],
y=df_by_country[yaxis_name],
mode='markers',
marker={'size': 15,
'opacity': 0.5,
'line': {'width': 0.5, 'color': 'white'}}
))
return {'data': traces,
'layout': go.Layout(title='population from groups',
xaxis={'title': xaxis_name},
yaxis={'title': yaxis_name},
hovermode='closest'),
}
@app.callback(
Output('app-2-display-value', 'children'),
[Input('app-2-dropdown', 'value')])
def display_value(value):
return 'You have selected "{}"'.format(value)
def PageLayOut():
layout = html.Div([
nav,
body
])
return layout
| [
"mckenna1995@gmail.com"
] | mckenna1995@gmail.com |
d380208a8d395599666fb61452b253d0e419138d | 7dfbc92eac00d58b15cb6275a928907f31b5fd5f | /edx6001x/throwawayexercises/midterm5.py | 18bfd675f0d4f683cc076d81c0c869b9f687548a | [] | no_license | jfolsom/pytorials | 463125c1053da36235248e0a9f5e811d7c52d783 | 1758cf9cc536866439a0128adb1e512bda942573 | refs/heads/master | 2020-08-21T17:54:28.216605 | 2020-03-14T20:58:11 | 2020-03-14T20:58:11 | 216,212,898 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 758 | py | def isunique(thisvalue, aDict):
count = 0
for value in aDict.values():
if value == thisvalue:
count += 1
if count == 1:
return True
else:
return False
def uniqueValues(aDict):
'''
aDict: a dictionary
for every unique integer item in aDict, return
the corresponding keys in order
'''
returnkeys = []
for k, v in aDict.items():
if type(v) == int and isunique(v, aDict):
returnkeys.append(k)
try:
returnkeys.sort()
except:
donothing = None
return returnkeys
aDict = {'g': 23125, 'b': 42, 'c': 223, 'frangible': -23, 'bu': 543, 5: 2, 'bubu': 543, 3: 1, 'carrot': 2}
listylist = uniqueValues(aDict)
print(listylist)
| [
"cercops@gmail.com"
] | cercops@gmail.com |
149ff803cf2e12675ab01b204bcf549300d50aea | 0e1a0329e1b96405d3ba8426fd4f935aa4d8b04b | /base/tests/test_create_free_client.py | 33cbc04a108ef50da4ffb8fda7a8f0709f6032c5 | [] | no_license | ugik/Blitz | 6e3623a4a03309e33dcc0b312800e8cadc26d28c | 740f65ecaab86567df31d6a0055867be193afc3d | refs/heads/master | 2021-05-03T20:15:20.516014 | 2015-03-11T12:33:34 | 2015-03-11T12:33:34 | 25,015,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,535 | py | # -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
import unittest, time, re
class TestCreateFreeClient(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(30)
self.driver.set_window_size(1300, 1000)
self.base_url = "http://127.0.0.1:8000"
self.verificationErrors = []
self.accept_next_alert = True
def test_create_free_client(self):
driver = self.driver
driver.get(self.base_url + "/client-signup?signup_key=TEST2")
driver.find_element_by_name("password1").clear()
driver.find_element_by_name("password1").send_keys("asdf")
driver.find_element_by_name("password2").clear()
driver.find_element_by_name("password2").send_keys("asdf")
driver.find_element_by_xpath("//button").click()
driver.find_element_by_link_text(u"Set up your profile →").click()
driver.find_element_by_css_selector("label.radio").click()
driver.find_element_by_name("age").clear()
driver.find_element_by_name("age").send_keys("30")
driver.find_element_by_xpath("//form[@id='setupForm']/div[3]/label[2]").click()
# Warning: assertTextPresent may require manual changes
self.assertRegexpMatches(driver.find_element_by_css_selector("BODY").text, r"^[\s\S]*$")
driver.find_element_by_name("weight").clear()
driver.find_element_by_name("weight").send_keys("100")
driver.find_element_by_name("height_feet").clear()
driver.find_element_by_name("height_feet").send_keys("1")
driver.find_element_by_name("height_inches").clear()
driver.find_element_by_name("height_inches").send_keys("80")
driver.find_element_by_css_selector("button.obtn.full-width").click()
driver.find_element_by_id("skip-headshot").click()
driver.find_element_by_link_text(u"Finish Signup →").click()
# Warning: assertTextPresent may require manual changes
self.assertRegexpMatches(driver.find_element_by_css_selector("BODY").text, r"^[\s\S]*$")
driver.get(self.base_url + "/")
# driver.find_element_by_link_text("Log Workout").click()
# import pdb; pdb.set_trace()
# driver.find_element_by_xpath("//div[2]/input").clear()
# driver.find_element_by_xpath("//div[2]/input").send_keys("90")
# driver.find_element_by_xpath("//div[3]/div[2]/input").clear()
# driver.find_element_by_xpath("//div[3]/div[2]/input").send_keys("95")
# driver.find_element_by_xpath("//div[3]/div[3]/input").clear()
# driver.find_element_by_xpath("//div[3]/div[3]/input").send_keys("7")
# driver.find_element_by_xpath("//div[4]/div[2]/input").clear()
# driver.find_element_by_xpath("//div[4]/div[2]/input").send_keys("100")
# driver.find_element_by_xpath("//div[4]/div[3]/input").clear()
# driver.find_element_by_xpath("//div[4]/div[3]/input").send_keys("8")
# driver.find_element_by_css_selector("span.small").click()
# time.sleep(1)
# driver.find_element_by_link_text("Save These Sets").click()
# driver.find_element_by_css_selector("button.obtn.log-workout-submit").click()
# Warning: assertTextPresent may require manual changes
# self.assertRegexpMatches(driver.find_element_by_css_selector("BODY").text, r"^[\s\S]*$")
driver.get(self.base_url + "/logout")
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException, e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException, e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
| [
"georgek@gmail.com"
] | georgek@gmail.com |
538993cfd91beecc42fc208fc0c19cbd03b0c868 | 088d9d76d696f26d0f0bbe8f9d1ee0acc8757356 | /dartsharp/transpiler.py | 44b3a37abfa4838e4ec0aeb3d3ef54a1788908a8 | [
"MIT"
] | permissive | yczhangsjtu/DartSharp | c1b15ee8b0db1556421c69bbc5bb70cad581b714 | f25171e1a9c7b462f4f6b931fe1785ebf7adf333 | refs/heads/master | 2021-01-04T18:56:05.180777 | 2020-06-09T00:06:35 | 2020-06-09T00:06:35 | 240,718,424 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,762 | py | from dart.function import FunctionLocator, ConstructorLocator, VariableDeclareLocator,\
FunctionalParameterItemElement
from dart.expression import DartListElement, FunctionInvocationElement, DoubleDotElement,\
is_capitalized, AssignmentElement
from dart.classes import ClassLocator, GetterLocator
from dart.globals import ImportLocator, PartOfLocator, TypedefLocator
from eregex.replacer import Replacer
from eregex.element import NumberElement, StringElement
import re
class DartSharpTranspiler(object):
"""DartSharpTranspiler"""
def __init__(self, global_class_name="Utils", indent=" ", double_to_float=True, engines=[]):
super(DartSharpTranspiler, self).__init__()
self.global_class_name = global_class_name
self.import_locator = ImportLocator(indentation="")
self.part_of_locator = PartOfLocator(indentation="")
self.class_locator = ClassLocator(inner_indentation=indent)
self.function_locator = FunctionLocator(inner_indentation=indent)
self.variable_declare_locator = VariableDeclareLocator(indentation="")
self.typedef_locator = TypedefLocator(indentation="")
self.double_to_float = double_to_float
self.indent = indent
self.engines = engines
self.reset()
def reset(self):
self.global_functions = {}
self.global_variables = {}
self.class_attributes = {}
self.needed_namespaces = {}
self.need_initialize = {}
self.setters = {}
self.error_messages = []
def using_namespace(self, namespace):
if namespace is not None:
self.needed_namespaces[namespace] = True
def get_namespaces(self):
return "\n".join(map(lambda x: "using %s;" % x, self.needed_namespaces.keys()))
def get_static_util_class(self):
parts = []
parts.extend(self.global_functions.values())
parts.extend(self.global_variables.values())
return "static class %s {\n%s\n}" % (self.global_class_name, self.indented("\n\n".join(parts)))
def front_matter(self):
parts = []
if len(self.needed_namespaces) > 0:
parts.append(self.get_namespaces())
if len(self.global_functions) + len(self.global_variables) > 0:
parts.append(self.get_static_util_class())
if len(parts) > 0:
return "\n\n".join(parts)
return ""
def get_class_attribute_type(self, class_name, attribute_name):
if class_name in self.class_attributes:
if attribute_name in self.class_attributes[class_name]:
return self.class_attributes[class_name][attribute_name]
def add_need_initialize(self, class_name, attribute_name):
if class_name not in self.need_initialize:
self.need_initialize[class_name] = []
if attribute_name not in self.need_initialize[class_name]:
self.need_initialize[class_name].append(attribute_name)
def add_setter(self, class_name, setter):
if class_name not in self.setters:
self.setters[class_name] = {}
self.setters[class_name][setter.name.content()] = setter
def get_setter(self, class_name, name):
if class_name not in self.setters:
return None
if name not in self.setters[class_name]:
return None
return self.setters[class_name][name]
def transpile_dart_code(self, code):
self.reset()
replacer = Replacer(code)
imports = self.import_locator.locate_all(code)
for imp in imports:
replacer.update((imp.start, imp.end, ""))
for engine in self.engines:
namespace = engine.get_namespace(imp.target.inside_content())
if namespace is not None:
self.using_namespace(namespace)
part_ofs = self.part_of_locator.locate_all(code)
for part_of in part_ofs:
replacer.update((part_of.start, part_of.end, ""))
global_variables = self.variable_declare_locator.locate_all(code)
for global_variable in global_variables:
gv = self.transpile_attribute(global_variable).strip()
if not gv.startswith("public"):
gv = "public static %s" % gv
else:
gv = "public static%s" % gv[6:]
self.global_variables[global_variable.name.content()] = gv
replacer.update((global_variable.start, global_variable.end, ""))
global_functions = self.function_locator.locate_all(code)
for func in global_functions:
gf = self.transpile_function(func).strip()
if not gf.startswith("public"):
gf = "public static %s" % gf
else:
gf = "public static%s" % gf[6:]
self.global_functions[func.name.content()] = gf
replacer.update((func.start, func.end, ""))
class_blocks = self.class_locator.locate_all(code)
for class_block in class_blocks:
replacer.update((class_block.start, class_block.end, self.transpile_class(class_block)))
typedefs = self.typedef_locator.locate_all(code)
for typedef in typedefs:
replacer.update((typedef.start, typedef.end, self.transpile_typedef(typedef)))
replacer.update((0, 0, self.front_matter()))
self.error_messages.extend(replacer.error_messages)
return replacer.digest()
def transpile_class(self, class_block):
replacer = Replacer(class_block.text, class_block.start, class_block.end)
replacer.update((class_block.header.start, class_block.header.end, self.transpile_class_header(class_block.header)))
self.class_attributes[class_block.name.name.content()] = {}
if class_block.attributes is not None:
for attribute in class_block.attributes:
replacer.update((attribute.start, attribute.end, self.transpile_attribute(attribute, class_block.name)))
if class_block.functions is not None:
for func in class_block.functions:
replacer.update((func.start, func.end, self.transpile_function(func)))
if class_block.setters is not None:
for setter in class_block.setters:
self.add_setter(class_block.name.content(), setter)
if class_block.getters is not None:
for getter in class_block.getters:
replacer.update((getter.start, getter.end, self.transpile_getter(getter, class_block.name.content())))
setter = self.get_setter(class_block.name.content(), getter.name.content())
if setter is not None:
del self.setters[class_block.name.content()][setter.name.content()]
replacer.update((setter.start, setter.end, ""))
if class_block.name.content() in self.setters:
for name in self.setters[class_block.name.content()]:
setter = self.setters[class_block.name.content()][name]
replacer.update((setter.start, setter.end, self.transpile_setter(setter)))
if class_block.constructors is not None:
for constructor in class_block.constructors:
replacer.update((constructor.start, constructor.end, self.transpile_constructor(constructor)))
self.error_messages.extend(replacer.error_messages)
return replacer.digest()
def transpile_class_header(self, header):
replacer = Replacer(header.text, header.start, header.end)
words = []
if not header.name.content().startswith("_"):
words.append("public")
if header.is_abstract:
words.append("abstract")
words.append("class")
words.append(header.name.content())
extensions = []
if header.extends is not None:
extensions.extend(map(lambda element: element.content(), header.extends.elements))
if header.with_mixes is not None:
extensions.extend(map(lambda element: element.content(), header.with_mixes.elements))
if header.implements is not None:
extensions.extend(map(lambda element: element.content(), header.implements.elements))
if len(extensions) > 0:
words.append(":")
words.extend(extensions)
return " ".join(words)
def transpile_typedef(self, typedef):
items = []
if not typedef.name.content().startswith("_"):
items.append("public")
items.append("delegate")
header = self.transpile_funcheader(typedef.header, override=False)
if header.startswith("public "):
header = header[7:]
items.append(header)
return "%s;" % " ".join(items)
def transpile_attribute(self, attribute, class_name=None):
items = []
if not attribute.name.content().startswith("_"):
items.append("public")
if attribute.modifier is not None:
if attribute.modifier.content() == "final" or\
attribute.modifier.content() == "const":
items.append("readonly")
if attribute.typename is not None:
typename = self.transpile_typename(attribute.typename)
else:
typename = self.deduce_type(attribute.default_value)
if typename is None:
self.error_messages.append("Cannot deduce type of %s." % attribute.default_value.content())
if typename is not None:
items.append(typename)
if class_name is not None:
self.class_attributes[class_name.name.content()][attribute.name.content()] = typename
items.append(attribute.name.content())
if attribute.default_value is not None:
items.append("=")
items.append(self.transpile_expression(attribute.default_value))
return "%s;" % " ".join(items)
def transpile_variable_declare(self, attribute, func_name=None):
items = []
if attribute.modifier is not None:
if attribute.modifier.content() == "var":
items.append("var")
if attribute.typename is not None:
items.append(self.transpile_typename(attribute.typename))
elif len(items) == 0 or items[-1] != "var":
items.append("var")
items.append(attribute.name.content())
if attribute.default_value is not None:
items.append("=")
items.append(self.transpile_expression(attribute.default_value))
return "%s;" % " ".join(items)
def transpile_function(self, func):
replacer = Replacer(func.text, func.start, func.end)
replacer.update((func.header.start, func.header.end, self.transpile_funcheader(func.header, func.override)))
if func.modifiers is not None:
replacer.update((func.modifiers.start, func.modifiers.end, ""))
if func.statements is not None:
for statement in func.statements:
replacer.update((statement.start, statement.end, self.transpile_statement(statement)))
if func.variable_declares is not None:
for variable_declare in func.variable_declares:
replacer.update((variable_declare.start, variable_declare.end, self.transpile_variable_declare(variable_declare, func.name)))
if func.for_in_blocks is not None:
for for_in_block in func.for_in_blocks:
replacer.update((for_in_block.start, for_in_block.end, self.transpile_for_in_block(for_in_block)))
if func.expression_body is not None:
replacer.update((func.expression_body.start, func.expression_body.end, self.transpile_expression(func.expression_body)))
self.error_messages.extend(replacer.error_messages)
return replacer.digest()
def transpile_statement(self, statement):
element = statement.element
if isinstance(element, FunctionInvocationElement):
return "%s;" % self.transpile_function_invocation(element)
if isinstance(element, AssignmentElement):
if element.sign.content() == "??=":
left = element.left.content()
right = self.transpile_expression(element.right)
return "%s = %s ?? %s;" % (left, left, right)
elif element.sign.content() == "=":
left = element.left.content()
right = self.transpile_expression(element.right)
return "%s = %s;" % (left, right)
return statement.content()
def transpile_getter(self, getter, class_name):
header_parts = []
if not getter.name.content().startswith("_"):
header_parts.append("public")
if getter.override:
header_parts.append("override")
header_parts.append(self.transpile_typename(getter.typename))
header_parts.append(getter.name.content())
header = " ".join(header_parts)
if getter.is_arrow:
body = "return %s;" % getter.inside_content()
else:
body = getter.inside_content()
setter = self.get_setter(class_name, getter.name.content())
if setter is None:
return "\n%s%s {\n%s%sget {\n%s%s%s\n%s%s}\n%s}" % (
getter.indentation,
header,
getter.indentation,
self.indent,
getter.indentation,
self.indent,
self.indented(body),
getter.indentation,
self.indent,
getter.indentation
)
return "\n%s%s {\n%s%sget {\n%s%s%s\n%s%s}\n\n%s%s%s\n%s}" % (
getter.indentation,
header,
getter.indentation,
self.indent,
getter.indentation,
self.indent,
self.indented(body),
getter.indentation,
self.indent,
getter.indentation,
self.indent,
self.transpile_setter(setter, only_inside=True),
getter.indentation
)
def transpile_setter(self, setter, only_inside=False):
header_parts = []
if not setter.name.content().startswith("_"):
header_parts.append("public")
if setter.override:
header_parts.append("override")
header_parts.append(self.transpile_typename(setter.typename))
header_parts.append(setter.name.content())
header = " ".join(header_parts)
if setter.is_arrow:
body = "%s;" % re.sub(r"\b%s\b" % setter.variable.content(), "value", setter.inside_content())
else:
body = re.sub(r"\b%s\b" % setter.variable.content(), "value", setter.inside_content())
if only_inside:
return "set {\n%s%s%s\n%s%s}" % (
setter.indentation,
self.indent,
self.indented(body),
setter.indentation,
self.indent,
)
return "\n%s%s {\n%s%sset {\n%s%s%s\n%s%s}\n%s}" % (
setter.indentation,
header,
setter.indentation,
self.indent,
setter.indentation,
self.indent,
self.indented(body),
setter.indentation,
self.indent,
setter.indentation
)
def transpile_constructor(self, constructor):
replacer = Replacer(constructor.text, constructor.start, constructor.end)
replacer.update((constructor.header.start, constructor.header.end, self.transpile_constructor_header(constructor.header)))
body_start, body_end = constructor.header.end, constructor.end
body_indentation = "%s%s" % (constructor.indentation, self.indent)
body_parts = [" {"]
if constructor.name.content() in self.need_initialize:
attributes = self.need_initialize[constructor.name.content()]
body_parts.append("%s%s" % (body_indentation, ("\n%s" % body_indentation).join(
map(lambda a: "this.%s = %s;" % (a, a), attributes)
)))
if constructor.initializer_content() is not None:
body_parts.append("%s%s;" % (body_indentation, constructor.initializer_content().strip()))
if constructor.braced_content() is not None:
body_parts.append(constructor.braced_content())
body_parts.append("%s}" % constructor.indentation)
replacer.update((body_start, body_end, "\n".join(body_parts)))
self.error_messages.extend(replacer.error_messages)
return replacer.digest()
def transpile_funcheader(self, header, override):
replacer = Replacer(header.text, header.start, header.end)
name_parts = []
if not header.name.content().startswith("_"):
name_parts.append("public")
if header.static is not None:
name_parts.append("static")
if override:
name_parts.append("override")
if self.double_to_float and self.transpile_typename(header.typename) == "double":
name_parts.append("float")
else:
name_parts.append(self.transpile_typename(header.typename))
name_parts.append(self.transpile_func_name(header.name))
return "%s(%s)" % (" ".join(name_parts), self.transpile_parameter_list(header.parameter_list))
def transpile_func_name(self, name):
if not isinstance(name, str):
name = name.content()
if name in self.global_functions or name in self.global_variables:
return "%s.%s" % (self.global_class_name, name)
if name == "super":
return "base"
if name.startswith("super."):
return "base%s" % name[5:]
for engine in self.engines:
namespace = engine.require_namespace(name)
if namespace is not None:
self.using_namespace(namespace)
mapped_word = engine.map_word(name)
if mapped_word is not None:
return mapped_word
name_parts = name.split('.')
if len(name_parts) > 1:
return ".".join([self.transpile_func_name(name_part) for name_part in name_parts])
return name
def transpile_constructor_header(self, header):
replacer = Replacer(header.text, header.start, header.end)
name_parts = []
if not header.name.content().startswith("_"):
name_parts.append("public")
name_parts.append(header.name.content())
return "%s(%s)" % (" ".join(name_parts), self.transpile_parameter_list(header.parameter_list, header.name))
def transpile_parameter_list(self, parameter_list, class_name=None):
lists = []
if parameter_list.positioned is not None:
lists.append(self.transpile_positioned_parameter_list(parameter_list.positioned, class_name))
if parameter_list.named is not None:
lists.append(self.transpile_named_parameter_list(parameter_list.named, class_name))
result = ", ".join(lists)
if result.strip().endswith(","):
index = result.rfind(",")
result = result[:index] + result[index+1:]
return result
def transpile_positioned_parameter_list(self, parameter_list, class_name=None):
replacer = Replacer(parameter_list.text, parameter_list.start, parameter_list.end)
items = parameter_list.elements
for i in range(len(items)):
if items[i].default_value is None:
replacer.update((items[i].start, items[i].end, self.transpile_parameter_item(items[i], class_name=class_name)))
self.error_messages.extend(replacer.error_messages)
return replacer.digest()
def transpile_named_parameter_list(self, parameter_list, class_name=None):
replacer = Replacer(parameter_list.text, parameter_list.start, parameter_list.end)
items = parameter_list.elements
for i in range(len(items)):
replacer.update((items[i].start, items[i].end, self.transpile_parameter_item(items[i], add_default_value=True, class_name=class_name)))
self.error_messages.extend(replacer.error_messages)
return replacer.digest()[1:-1]
def transpile_parameter_item(self, parameter_item, add_default_value=False, class_name=None):
items = []
default_value = None
if parameter_item.default_value is not None:
default_value = self.transpile_expression(parameter_item.default_value)
typename = None
if isinstance(parameter_item.element, FunctionalParameterItemElement):
self.using_namespace("System")
if parameter_item.typename.content() == "void":
if parameter_item.element.function_header.parameter_list is None:
typename = "Action"
else:
types = []
if parameter_item.positioned is not None:
for i in range(len(parameter_item.positioned)):
types.append(self.transpile_typename(parameter_item.positioned[i].typename))
if parameter_item.named is not None:
for i in range(len(parameter_item.named)):
types.append(self.transpile_typename(parameter_item.named[i].typename))
typename = "Action<%s>" % ", ".join(types)
else:
types = []
if parameter_item.positioned is not None:
for i in range(len(parameter_item.element.function_header.parameter_list.positioned)):
types.append(self.transpile_typename(parameter_item.element.parameter_list.positioned[i].typename))
if parameter_item.named is not None:
for i in range(len(parameter_item.named)):
types.append(self.transpile_typename(parameter_item.named[i].typename))
types.append(self.transpile_typename(parameter_item.typename))
typename = "Function<%s>" % ", ".join(types)
elif parameter_item.typename is not None:
typename = self.transpile_typename(parameter_item.typename)
if self.double_to_float and typename == "double":
typename = "float"
if default_value is not None and isinstance(parameter_item.default_value, NumberElement):
default_value = default_value + "f"
else:
if class_name is not None:
typename = self.get_class_attribute_type(class_name.content(), parameter_item.name.content())
self.add_need_initialize(class_name.content(), parameter_item.name.content())
if default_value is None and add_default_value:
default_value = "null"
if typename == "int":
default_value = "0"
elif typename == "float":
default_value = "0.0f"
elif typename == "double":
default_value = "0.0"
elif typename == "bool":
default_value = "false"
if parameter_item.required:
items.append("/* @required */")
if typename is not None:
items.append(typename)
items.append(parameter_item.name.content())
if default_value is not None:
items.append("=")
items.append(default_value)
return " ".join(items)
def transpile_for_in_block(self, for_in_block):
replacer = Replacer(for_in_block.text, for_in_block.start, for_in_block.end)
replacer.update((for_in_block.forword.start, for_in_block.forword.end, "foreach"))
if for_in_block.typename.content() == "final":
replacer.update((for_in_block.typename.start, for_in_block.typename.end, "var"))
if for_in_block.for_in_blocks is not None:
for subblock in for_in_block.for_in_blocks:
replacer.update((subblock.start, subblock.end, self.transpile_for_in_block(subblock)))
self.error_messages.extend(replacer.error_messages)
return replacer.digest()
def transpile_typename(self, typename):
if isinstance(typename, str):
for engine in self.engines:
namespace = engine.require_namespace(typename)
if namespace is not None:
self.using_namespace(namespace)
mapped_word = engine.map_word(typename)
if mapped_word is not None:
return mapped_word
return typename
if typename.content() == "double" and self.double_to_float:
return "float"
if typename.content() == "String":
return "string"
replacer = Replacer(typename.text, typename.start, typename.end)
for engine in self.engines:
namespace = engine.require_namespace(typename.name.content())
if namespace is not None:
self.using_namespace(namespace)
mapped_word = engine.map_word(typename.name.content())
if mapped_word is not None:
replacer.update((typename.name.start, typename.name.end, mapped_word))
break
if typename.template_types is not None:
for i in range(len(typename.template_types)):
replacer.update((typename.template_types[i].start,
typename.template_types[i].end,
self.transpile_typename(typename.template_types[i])))
self.error_messages.extend(replacer.error_messages)
return replacer.digest()
def transpile_expression(self, value):
expression = value.expression
if isinstance(expression, DartListElement):
self.using_namespace("System.Collections.Generic")
replacer = Replacer(expression.text, expression.start, expression.end)
if expression.typename is not None:
replacer.update((expression.bracket.start, expression.bracket.end, "new List<%s>" % self.transpile_typename(expression.typename)))
else:
replacer.update((expression.start, expression.start, "new List"))
if expression.elements is None:
replacer.update((expression.open_bracket.start, expression.close_bracket.end, "()"))
else:
replacer.update((expression.open_bracket.start, expression.open_bracket.end, "{"))
replacer.update((expression.close_bracket.start, expression.close_bracket.end, "}"))
for i in range(len(expression.elements)):
replacer.update((expression.elements[i].start, expression.elements[i].end, self.transpile_expression(expression.elements[i])))
self.error_messages.extend(replacer.error_messages)
return replacer.digest()
if isinstance(expression, FunctionInvocationElement):
return self.transpile_function_invocation(expression)
if isinstance(expression, StringElement):
if expression.is_raw:
return "@\"%s\"" % expression.inside_content()
else:
if expression.inside_content().find("${") >= 0:
return "$\"%s\"" % expression.inside_content().replace("${", "{")
return "\"%s\"" % expression.inside_content()
if isinstance(expression, DoubleDotElement):
replacer = Replacer(expression.text, expression.start, expression.end)
replacer.update((expression.expression.start, expression.expression.end, self.transpile_expression(expression.expression)))
for i in range(len(expression.arms)):
arm = expression.arms[i]
double_dots = arm[0]
replacer.update((double_dots.start, double_dots.start+1, "tmp"))
replacer.update((expression.arms.start, expression.arms.start, "/* "))
replacer.update((expression.arms.end, expression.arms.end, " */"))
self.error_messages.extend(replacer.error_messages)
return replacer.digest()
if isinstance(expression, NumberElement):
if expression.frac_part is not None and self.double_to_float:
return "%sf" % expression.content()
return expression.content()
def transpile_function_invocation(self, function_invocation):
expression = function_invocation
replacer = Replacer(expression.text, expression.start, expression.end)
replacer.update((expression.name.start, expression.name.end, self.transpile_func_name(expression.name)))
if expression.arguments is not None:
for i in range(len(expression.arguments)):
replacer.update((expression.arguments[i].value.start, expression.arguments[i].value.end, self.transpile_expression(expression.arguments[i].value)))
if expression.modifier is not None:
if expression.modifier.content() == "const":
if is_capitalized(expression.pure_name()):
replacer.update((expression.modifier.start, expression.modifier.end, "new"))
else:
replacer.update((expression.modifier.start, expression.modifier.end, ""))
else:
if is_capitalized(expression.pure_name()):
replacer.update((expression.name.start, expression.name.start, "new "))
self.error_messages.extend(replacer.error_messages)
return replacer.digest()
def deduce_type(self, value):
expression = value.expression
if isinstance(expression, NumberElement):
if expression.frac_part is not None:
if self.double_to_float:
return "float"
else:
return "double"
else:
return "int"
if isinstance(expression, StringElement):
return "string"
if isinstance(expression, DartListElement):
if expression.typename is not None:
return "List<%s>" % self.transpile_typename(expression.typename)
return "List"
if isinstance(expression, FunctionInvocationElement):
if is_capitalized(expression.pure_name()):
return self.transpile_typename(expression.name.content())
pcn = expression.possible_class_name()
if pcn is not None:
return self.transpile_typename(pcn)
return None
if expression.content() == "true" or expression.content() == "false":
return "bool"
if expression.content().endswith(".length"):
return "int"
return None
def indented(self, text, steps=1):
return "%s%s" % (self.indent, text.replace("\n", "\n%s" % self.indent * steps))
| [
"yczhangsjtu@gmail.com"
] | yczhangsjtu@gmail.com |
afd83845d02b2e0bbd10ada0166a5fdaecea47d0 | a4520ed992a5e7ded73a2f5121f131d520c85624 | /Python/Binary_Tree_Maximum_Path_Sum.py | e7952d89a59628c493f44525842f25d242b4c644 | [] | no_license | kumailn/Algorithms | 2bfa11e8e31c3cff8c8c42f86d02739f8c172505 | e6be0a0cbf0caf13a723a9edfd2df33c2234235f | refs/heads/master | 2022-11-30T07:02:46.672681 | 2022-11-06T23:08:09 | 2022-11-06T23:08:09 | 122,286,889 | 64 | 17 | null | 2019-10-14T06:47:09 | 2018-02-21T03:14:18 | Python | UTF-8 | Python | false | false | 1,248 | py | def maxPathSum(root: TreeNode) -> int:
maxSum = float('-inf')
def maxChildrenSum(root):
nonlocal maxSum
# If we land on a null node we can return 0 safely because
# we know that it will not contribute to the sum of the root
if not root: return 0
# Now we need to compute the sums of the left and right children
# if the sums of either children is less than 0 we know it'll drag down the total
# sum of our path, so we can just omit that child and take 0 instead of it's sum
left = max(maxChildrenSum(root.left), 0)
right = max(maxChildrenSum(root.right), 0)
# If the path we just computed (ie the path with the current node as root) happens to
# have a bigger sum than the biggest one we've seen, it becomes our new biggest
maxSum = max(maxSum, left + root.val + right)
# We return the sum of our current node plus the biggest gain on left or the right
# we don't take both the left and right sums because we are looking for an end to
# end path and so cannot traverse down both subtrees
return root.val + max(left, right)
maxChildrenSum(root)
return maxSum | [
"kumailmn@gmail.com"
] | kumailmn@gmail.com |
29dc5b8d43fb5302a1441222a19a7d9099bcf929 | 8ce0fd5e5c5b858fa24e388f2114885160421c03 | /python/netuse/net_use.py | 1fd08a9bcb6cb202b36976078d0b840d73d473a4 | [] | no_license | kong-ling/scripts | 266e9975ae0156d6fdddf43b8f1d7ee20469b388 | 3c41c49646358d46871c8fd8ebe1ba52bdea046c | refs/heads/master | 2021-01-10T08:29:34.772634 | 2020-01-03T09:04:57 | 2020-01-03T09:04:57 | 43,275,002 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | import os
import sys
import subprocess
p = subprocess.Popen('net use',
stdout = subprocess.PIPE,
stdin = subprocess.PIPE)
print(type(p))
for drv in p.stdout.readlines():
print(drv.strip())
| [
"kong.ling@outlook.com"
] | kong.ling@outlook.com |
54eef6b92d0dea189cce79be2163407619b9dcff | f85cc3fb482f1b71e7a749e1bcdbe90ba78fd059 | /swap_every_two_linked_list.py | 382cd105924360af622ba95190ca1d4012b07495 | [] | no_license | shan-mathi/InterviewBit | c94e091f728b9d18d55e86130756824a3637a744 | 6688e4ff54d56cf75297bb72ce67926b40e45127 | refs/heads/main | 2023-06-29T10:43:29.712472 | 2021-08-05T19:06:53 | 2021-08-05T19:06:53 | 364,321,855 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 728 | py | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param A : head node of linked list
# @return the head node in the linked list
def swapPairs(self, A):
if A is None or A.next is None:
return A
temp = ListNode(-1)
temp.next = A
current = temp
while current.next is not None and current.next.next is not None:
first = current.next
second = current.next.next
first.next = second.next
current.next = second
current.next.next = first
current = current.next.next
return temp.next
| [
"noreply@github.com"
] | noreply@github.com |
b582204cc823c8bbb76f24bede5a93b806555eff | cf0e827be05b2f2ec8afe8363996f7763af85414 | /AlienInvasion/settings.py | e4d729ed2d012bbb0d01d44b143ef54c0367af31 | [
"Apache-2.0"
] | permissive | imtoby/LearnPythonRecord | 3f2757e0c70d793ad70c1ef2a8335b56004e2b55 | 81cfb0940da8af23afa67796b61973cc6ead31f9 | refs/heads/master | 2021-05-09T14:36:19.216559 | 2018-02-28T03:09:27 | 2018-02-28T03:09:27 | 119,069,685 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,712 | py | # -*- coding: utf-8 -*-
# Created by: ZhaoDongshuang
# Created on: 18-1-29
class Settings:
"""存储设置信息"""
def __init__(self):
""" 初始化游戏的静态设置 """
# 屏幕设置
self.screen_width = 1200
self.screen_height = 800
self.background_color = (230, 230, 230)
# 飞船设置
self.ship_speed_factor = 1.5
self.ship_limit = 3
# 子弹设置
self.bullet_speed_factor = 3
self.bullet_width = 3
self.bullet_height = 15
self.bullet_color = (60, 60, 60)
self.bullet_count_allowed = 3
# 外星人设置
self.alien_speed_factor = 1
self.fleet_drop_speed = 10
# fleet_direction 为 1 表示向右移,为 -1 表示向左移
self.fleet_direction = 1
# 记分
self.alien_points = 50
# 以什么样的速度加快游戏节奏
self.speedup_scale = 1.1
# 外星人点数的提高速度
self.score_scale = 1.5
self.initialize_dynamic_settings()
def initialize_dynamic_settings(self):
""" 初始化随游戏进行而变化的设置 """
self.ship_speed_factor = 1.5
self.bullet_speed_factor = 3
self.alien_speed_factor = 1
# fleet_direction 为 1 表示向右;为 -1 表示向左
self.fleet_direction = 1
self.alien_points = 50
def increase_speed(self):
""" 提高速度设置 """
self.ship_speed_factor *= self.speedup_scale
self.bullet_speed_factor *= self.speedup_scale
self.alien_speed_factor *= self.speedup_scale
self.alien_points = int(self.alien_points * self.score_scale)
| [
"zhaodongshuang@syberos.com"
] | zhaodongshuang@syberos.com |
ec1776e8742cedfafef2050a4f6d43076bd74404 | 7b3b859dd633eb2240d987b37e487ea8388e2f8d | /core/main.py | a9b413f579a53f5bd8f1dcc138a83420fc140c2b | [] | no_license | yszhuang/assetPricing2 | 96956638f6c26e4e7d33e0abffe5c5c14460000a | 10af01a66bcd13cb516920e9cb1b46d8cfa6b598 | refs/heads/master | 2022-01-13T02:00:09.070100 | 2018-09-01T02:28:21 | 2018-09-01T02:28:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30,113 | py | # -*-coding: utf-8 -*-
# Python 3.6
# Author:Zhang Haitao
# Email:13163385579@163.com
# TIME:2018-04-28 14:58
# NAME:assetPricing2-main.py
import os
import shutil
import time
import pandas as pd
import numpy as np
from config import WINSORIZE_LIMITS
from data.dataApi import Database, Benchmark
from tool import monitor, summary_statistics, cal_corr, cal_persistence, \
cal_breakPoints, count_groups, my_average, \
assign_port_id, famaMacBeth, apply_col_by_col, newey_west, correlation_mixed
from zht.utils.mathu import winsorize, get_outer_frame
DATA=Database(sample_control=True) #TODO: use controlled data
# In the fm function,independent variables are winsorized,so we do not need to filter the raw data.
def combine_with_datalagged(indicators,sample_control=True):
datalagged=Database(sample_control).by_indicators(indicators + ['weight'])
datat = Database(sample_control).by_indicators(['stockEretM'])
'''
sort the lagged characteristics to construct portfolios
Notice:
before shift(1),we must groupby('sid').
'''
#trick:shift(1) we result in the sample loss of first month,upgrade this
#function to take shift in consideration.
comb = pd.concat([datalagged.groupby('sid').shift(1), datat],axis=1)
return comb
@apply_col_by_col
def adjust_with_riskModel(x, riskmodel=None):
'''
use risk model to adjust the the alpha,
the risk model can be None (unadjusted) or one of [capm,ff3,ffc,ff5,hxz4]
:param x:
:param riskmodel:one of ['capmM', 'ff3M', 'ffcM', 'ff5M', 'hxz4M']
:return:
'''
lags=5
d={'capm':'capmM',
'ff3':'ff3M',
'ffc':'ffcM',
'ff5':'ff5M',
'hxz4':'hxz4M'}
df = pd.DataFrame(x)
df.columns = ['y']
if riskmodel in d.keys():
'''
we do not need to shift the time index,the index in df denotes time t+1 (the indicators
have been shifted forward),so,here the time for Stock excess return is consistent with
the time for benchmarks.Both of them are from time t+1.
'''
bench=Benchmark().by_benchmark(riskmodel)
df=pd.concat([df,bench],axis=1)
formula='y ~ '+' + '.join(bench.columns.tolist())
nw = newey_west(formula, df, lags)
return nw['Intercept'].rename(index={'coef':'alpha_'+riskmodel,
't': 't_alpha_'+riskmodel})
else:
formula='y ~ 1'
nw = newey_west(formula, df, lags)
return nw['Intercept'].rename(index={'coef': 'excess return',
't': 't excess return'})
def risk_adjust(panel,riskmodels=None):
'''
risk adjusted alpha
:param panel:
:return:
'''
if riskmodels is None:
riskmodels=[None,'capm','ff3','ffc','ff5','hxz4']
return pd.concat([adjust_with_riskModel(panel,rm)
for rm in riskmodels],axis=0)
class OneFactor:
q=10
def __init__(self, factor,path):
self.factor=factor
self.path=path #project path
self.indicators=DATA.info[factor]
self.df=DATA.by_indicators(self.indicators)
self.groupnames=[self.factor+str(i) for i in range(1,self.q+1)]
self._build_environment()
self.results={}
def _build_environment(self):
if os.path.exists(self.path):
shutil.rmtree(self.path)
time.sleep(0.1)
os.makedirs(self.path)
@monitor
def summary(self):
series=[]
for indicator in self.indicators:
s=summary_statistics(self.df[indicator].unstack())
series.append(s.mean())
# pd.concat(series,keys=self.indicators,axis=1).to_csv(os.path.join(self.path,'summary.csv'))
self.results['summary']=pd.concat(series,keys=self.indicators,axis=1)
@monitor
def correlation(self,indicators=None):
if indicators is None:
comb=self.df
else:
comb=DATA.by_indicators(indicators)
corr=correlation_mixed(comb)
# corr.to_csv(os.path.join(self.path, 'corr.csv'))
self.results['corr']=corr
@monitor
def persistence(self):
# TODO: Table II of Asness, Clifford S., Andrea Frazzini, and Lasse Heje Pedersen. “Quality Minus Junk.” SSRN Scholarly Paper. Rochester, NY: Social Science Research Network, June 5, 2017. https://papers.ssrn.com/abstract=2312432.
perdf = pd.DataFrame()
for indicator in self.indicators:
per = cal_persistence(self.df[indicator].unstack(),
offsets=[1, 3, 6, 12, 24, 36, 48, 60, 120])
perdf[indicator] = per
# perdf.to_csv(os.path.join(self.path, 'persistence.csv'))
self.results['persistence']=perdf
@monitor
def breakPoints_and_countGroups(self):
dfs_bp = []
dfs_count = []
for indicator in self.indicators:
d = self.df[indicator].unstack()
# there is no samples for some months due to festival
# TODO: how to set the thresh?
d = d.dropna(axis=0,thresh=self.q * 10)
bps = cal_breakPoints(d, self.q)
dfs_bp.append(bps)
count = count_groups(d, self.q)
dfs_count.append(count)
result_bp = pd.concat(dfs_bp, keys=self.indicators, axis=0)
result_count = pd.concat(dfs_count, keys=self.indicators, axis=0)
# result_bp.to_csv(os.path.join(self.path, 'breakPoints.csv'))
# result_count.to_csv(os.path.join(self.path, 'count.csv'))
self.results['breakPoints']=result_bp
self.results['count']=result_count
# TODO:In fact,the count is not exactly the number of stocks to calculate the weighted return
# TODO:as some stocks will be deleted due to the missing of weights.
def _get_port_data(self, indicator):
groupid = DATA.by_indicators([indicator])
#trick: pd.qcut will just ignore NaNs,but if all the values are NaNs it will throw an error
try:
groupid['g'] = groupid.groupby('t', group_keys=False).apply(
lambda df: pd.qcut(df[indicator], self.q,
labels=[indicator + str(i) for i in range(1, self.q + 1)],
duplicates='raise')#trick: drop the duplicated bins
)
except ValueError:#trick:qcut with non unique values https://stackoverflow.com/questions/20158597/how-to-qcut-with-non-unique-bin-edges
groupid['g'] = groupid.groupby('t', group_keys=False).apply(
lambda df: pd.qcut(df[indicator].rank(method='first'), self.q,
labels=[indicator + str(i) for i in range(1, self.q + 1)]) # trick: drop the duplicated bins
)
return groupid
@monitor
def portfolio_characteristics(self, sortedIndicator, otherIndicators):
'''
as table 12.3 panel A
:param sortedIndicator:
:param otherIndicators:
:return:
'''
groupid = self._get_port_data(sortedIndicator)
comb = DATA.by_indicators(otherIndicators)
comb = pd.concat([groupid, comb], axis=1)
characteristics_avg = comb.groupby(['t', 'g']).mean().groupby('g').mean()
# characteristics_avg.to_csv(os.path.join(self.path, 'portfolio characteristics.csv'))
self.results['portfolio characteristics']=characteristics_avg
# TODO: upgrade this function
def _get_panel_stk_avg(self, comb, indicator, gcol):
panel_stk_eavg = comb.groupby(['t', gcol])['stockEretM'].mean() #equal weighted
if self.factor == 'size':
'''
when the factor is size,we also use the indicator (sort variable) as weight
Refer to page 159.
'''
panel_stk_wavg = comb.groupby(['t', gcol]).apply(
lambda df: my_average(df, 'stockEretM', wname=indicator)
)
else:
'''
the index denotes t+1,and the weight is from time t,
since we have shift weight forward in dataset.
'''
# def func(df):
# return my_average(df,'stockEretM',wname='weight')
#
# panel_stk_wavg=comb.groupby(['t',gcol]).apply(func)
panel_stk_wavg = comb.groupby(['t', gcol]).apply(
lambda df: my_average(df, 'stockEretM', wname='weight')
)
return panel_stk_eavg, panel_stk_wavg
@monitor
def portfolio_analysis(self):
'''
table 8.4
:return:
'''
comb=combine_with_datalagged(self.indicators)
# all_indicators = list(set(self.indicators + ['weight', 'stockEretM']))
# comb = DATA.by_indicators(all_indicators)
result_eavg = []
result_wavg = []
for indicator in self.indicators:
gcol = 'g_%s' % indicator
# comb[gcol]=comb.groupby('t').apply(
# lambda df:grouping(df[indicator].reset_index(level='t'),self.q,labels=self.groupnames))
comb[gcol] = comb.groupby('t', group_keys=False).apply(
lambda df: assign_port_id(df[indicator], self.q, self.groupnames))
# TODO:Add an alternative sorting method,that is,updating yearly as page 9 of Chen et al., “On the Predictability of Chinese Stock Returns.”
panel_stk_eavg, panel_stk_wavg = self._get_panel_stk_avg(comb, indicator, gcol)
for panel_stk in [panel_stk_eavg, panel_stk_wavg]:
panel = panel_stk.unstack(level=[gcol])
panel.columns = panel.columns.astype(str)
panel['_'.join([self.groupnames[-1], self.groupnames[0]])] = panel[self.groupnames[-1]] - panel[
self.groupnames[0]]
panel['avg'] = panel.mean(axis=1)
# TODO: use the risk models declared above
# part A
a_data = comb.groupby(['t', gcol])[indicator].mean()
a_data = a_data.unstack()
a_data.columns = a_data.columns.astype(str)
a_data.index = a_data.index.astype(str)
a_data['_'.join([self.groupnames[-1], self.groupnames[0]])] = a_data[self.groupnames[-1]] - a_data[
self.groupnames[0]]
a_data['avg'] = a_data.mean(axis=1)
a = a_data.mean()
a.name = 'avg'
a = a.to_frame().T
riskAdjusted = risk_adjust(panel)
# TODO:something must be wrong with size or portfolio_analysse.
if panel_stk is panel_stk_eavg:
result_eavg.append(pd.concat([a, riskAdjusted], axis=0))
else:
result_wavg.append(pd.concat([a, riskAdjusted], axis=0))
table_e = pd.concat(result_eavg, axis=0, keys=self.indicators)
table_w = pd.concat(result_wavg, axis=0, keys=self.indicators)
# reorder the columns
initialOrder = table_e.columns.tolist()
h=self.groupnames+['avg']
newOrder=h+[col for col in initialOrder if col not in h]
# newOrder = self.groupnames + [col for col in initialOrder if col not in self.groupnames]
table_e = table_e.reindex(columns=newOrder)
table_w = table_w.reindex(columns=newOrder)
#mark the t values to facilitate the following analysis
table_e['significant_positive']=table_e.iloc[:,-1].map(lambda v:1 if v>2 else np.nan)
table_e['significant_negative']=table_e.iloc[:,-2].map(lambda v:-1 if v<-2 else np.nan)
table_w['significant_positive']=table_w.iloc[:,-1].map(lambda v:1 if v>2 else np.nan)
table_w['significant_negative']=table_w.iloc[:,-2].map(lambda v:-1 if v<-2 else np.nan)
# table_e.to_csv(os.path.join(self.path, 'univariate portfolio analysis-equal weighted.csv'))
# table_w.to_csv(os.path.join(self.path, 'univariate portfolio analysis-value weighted.csv'))
self.results['uni_port_analysis_eq']=table_e
self.results['uni_port_analysis_vw']=table_w
def _one_indicator(self, indicator):
ns = range(1, 13)
all_indicators=[indicator,'weight','stockEretM']
comb = DATA.by_indicators(all_indicators)
comb = comb.dropna()
try:
comb['g'] = comb.groupby('t', group_keys=False).apply(
lambda df: pd.qcut(df[indicator], self.q,
labels=[indicator + str(i) for i in range(1, self.q + 1)],
duplicates='raise')
)
except ValueError:#trick:qcut with non unique values https://stackoverflow.com/questions/20158597/how-to-qcut-with-non-unique-bin-edges
comb['g'] = comb.groupby('t', group_keys=False).apply(
lambda df: pd.qcut(df[indicator].rank(method='first'), self.q,
labels=[indicator + str(i) for i in range(1, self.q + 1)])
)
def _one_indicator_one_weight_type(group_ts, indicator):
def _big_minus_small(s, ind):
time = s.index.get_level_values('t')[0]
return s[(time, ind + str(self.q))] - s[(time, ind + '1')]
spread_data = group_ts.groupby('t').apply(lambda series: _big_minus_small(series, indicator))
s = risk_adjust(spread_data)
return s
eret = comb['eret'].unstack()
s_es = []
s_ws = []
eret_names = []
for n in ns:
eret_name = 'eret_ahead%s' % (n + 1)
comb[eret_name] = eret.shift(-n).stack()
group_eavg_ts = comb.groupby(['t', 'g'])[eret_name].mean()
group_wavg_ts=comb.groupby(['t','g']).apply(
lambda df:my_average(df,eret_name,'weight')
)
# group_wavg_ts = comb.groupby(['t', 'g']).apply(
# lambda df: np.average(df[eret_name], weights=df['weight']))#fixme: what if there is nan values?
#TODO: If we are analyzing size,the weights should be the indicator
#we are analyzing,rather than weight
s_e = _one_indicator_one_weight_type(group_eavg_ts, indicator)
s_w = _one_indicator_one_weight_type(group_wavg_ts, indicator)
s_es.append(s_e)
s_ws.append(s_w)
eret_names.append(eret_name)
eq_table = pd.concat(s_es, axis=1, keys=eret_names)
vw_table = pd.concat(s_ws, axis=1, keys=eret_names)
return eq_table, vw_table
@monitor
def portfolio_anlayse_with_k_month_ahead_returns(self):
'''table 11.4'''
eq_tables = []
vw_tables = []
for indicator in self.indicators:
eq_table, vw_table = self._one_indicator(indicator)
eq_tables.append(eq_table)
vw_tables.append(vw_table)
print(indicator)
eq = pd.concat(eq_tables, axis=0, keys=self.indicators)
vw = pd.concat(vw_tables, axis=0, keys=self.indicators)
# eq.to_csv(os.path.join(self.path, 'univariate portfolio analysis_k-month-ahead-returns-eq.csv'))
# vw.to_csv(os.path.join(self.path, 'univariate portfolio analysis_k-month-ahead-returns-vw.csv'))
self.results['uni_port_analysis_ahead_k_eq']=eq
self.results['uni_port_analysis_ahead_k_vw']=vw
@monitor
def fm(self):
comb=combine_with_datalagged(self.indicators)
data = []
ps = []
for indicator in self.indicators:
subdf = comb[[indicator, 'stockEretM']]
subdf = subdf.dropna()
subdf.columns = ['y', 'x']
'''
(page 141)The independent variable is winsorized at a given level on a monthly basis.
(page 90)The independent variables are usually winsorized to ensure that a small number of extreme
independent variable values do not have a large effect on the results of the regression.
In some cases the dependent variable is also winsorized.When the dependent variable is a
security return or excess return,this variable is usually not winsorized.In most other
cases,it is common to winsorized the dependent variable.
'''
subdf['x'] = subdf.groupby('t')['x'].apply(lambda s: winsorize(s, limits=WINSORIZE_LIMITS))
subdf = subdf.reset_index()
formula = 'y ~ x'
r, adj_r2, n, p= famaMacBeth(formula, 't', subdf, lags=5)
# TODO: why intercept tvalue is so large?
# TODO: why some fm regression do not have a adj_r2 ?
data.append([r.loc['x', 'coef'], r.loc['x', 'tvalue'],
r.loc['Intercept', 'coef'], r.loc['Intercept', 'tvalue'],
adj_r2, n])
ps.append(p['x'])
print(indicator)
result = pd.DataFrame(data, index=self.indicators,
columns=['slope', 't', 'Intercept', 'Intercept_t', 'adj_r2', 'n']).T
# result.to_csv(os.path.join(self.path, 'fama macbeth regression analysis.csv'))
parameters = pd.concat(ps, axis=1, keys=self.indicators)
# parameters.to_csv(os.path.join(self.path, 'fama macbeth regression parameters in first stage.csv'))
self.results['fm']=result
self.results['fm_first_stage']=parameters
@monitor
def parameter_ts_fig(self):
'''
田利辉 and 王冠英, “我国股票定价五因素模型.”
:return:
'''
#TODO: Why not plot rolling parameters?
parameters=self.results['fm_first_stage']
# parameters = pd.read_csv(os.path.join(self.path, 'fama macbeth regression parameters in first stage.csv'),
# index_col=[0], parse_dates=True)
parameters['zero'] = 0.0
for indicator in self.indicators:
s=parameters[indicator].dropna()
positive_ratio=(s>0).sum()/s.shape[0]
fig = parameters[[indicator, 'zero']].plot(title='positive ratio: {:.3f}'.format(positive_ratio)).get_figure()
fig.savefig(os.path.join(self.path, 'fm parameter ts fig-{}.png'.format(indicator)))
def save_results(self):
excelWriter=pd.ExcelWriter(os.path.join(self.path,self.factor+'.xlsx'))
for k in self.results:
self.results[k].to_excel(excelWriter,k)
def run(self):
self.summary()
self.correlation()
self.persistence()
self.breakPoints_and_countGroups()
# self.portfolio_characteristics()
self.portfolio_analysis()
self.fm()
self.parameter_ts_fig()
self.save_results()
def __call__(self):
self.run()
class Bivariate:
q=5
def __init__(self,indicator1,indicator2,proj_path):
self.indicator1=indicator1
self.indicator2=indicator2
self.path=proj_path
self._build_environment()
self.results={}
def _build_environment(self):
if os.path.exists(self.path):
shutil.rmtree(self.path)
os.makedirs(self.path)
def _get_independent_data(self):
# TODO: add the method of ratios such as [0.3,0.7]
comb=combine_with_datalagged([self.indicator1,self.indicator2])
comb=comb.dropna()
comb['g1']=comb.groupby('t',group_keys=False).apply(
lambda df:assign_port_id(df[self.indicator1], self.q,
[self.indicator1 + str(i) for i in range(1, self.q + 1)]))
comb['g2']=comb.groupby('t',group_keys=False).apply(
lambda df:assign_port_id(df[self.indicator2], self.q,
[self.indicator2 + str(i) for i in range(1,self.q + 1)]))
return comb
def _get_dependent_data(self,control,target):
'''
:param indicators:list with two elements,the first is the controlling variable
:return:
'''
comb=combine_with_datalagged([control,target])
comb=comb.dropna()
comb['g1']=comb.groupby('t',group_keys=False).apply(
lambda df:assign_port_id(df[control], self.q,
[control + str(i) for i in range(1,self.q + 1)]))
comb['g2']=comb.groupby(['t','g1'],group_keys=False).apply(
lambda df:assign_port_id(df[target], self.q,
[target + str(i) for i in range(1,self.q + 1)]))
return comb
def _get_eret(self,comb):
group_eavg_ts = comb.groupby(['g1', 'g2', 't'])['stockEretM'].mean()
group_wavg_ts = comb.groupby(['g1', 'g2', 't']).apply(
lambda df:my_average(df,'stockEretM','weight'))
return group_eavg_ts,group_wavg_ts
def _cal_portfolio_return(self, group_ts, controlGroup='g1', targetGroup='g2'):
# Table 9.6
controlIndicator = group_ts.index.get_level_values(controlGroup)[0][:-1]
targetName = group_ts.index.get_level_values(targetGroup)[0][:-1]
# A
a_data = group_ts.groupby(['t', controlGroup, targetGroup]).mean().unstack(level=[controlGroup])
a_data.columns = a_data.columns.astype(str)
# A1
a1_data = group_ts.groupby(['t', controlGroup, targetGroup]).mean().groupby(['t', targetGroup]).mean()
a_data[controlIndicator + ' avg'] = a1_data
_a = a_data.groupby(targetGroup).mean()
def _get_spread(df):
time = df.index.get_level_values('t')[0]
return df.loc[(time, targetName + str(self.q))] - df.loc[(time, targetName + '1')]
# B
b_data = a_data.groupby('t').apply(_get_spread)
_b1=adjust_with_riskModel(b_data)
_b2=adjust_with_riskModel(b_data,'capm')
_b1.index = [targetName + str(self.q) + '-1', targetName + str(self.q) + '-1 t']
_b2.index = [targetName + str(self.q) + '-1 capm alpha', targetName + str(self.q) + '-1 capm alpha t']
_a.index = _a.index.astype(str)
_a.columns = _a.columns.astype(str)
return pd.concat([_a, _b1, _b2], axis=0)
def _average_control_variable_portfolios(self, group_ts, controlGroup='g2', targetGroup='g1'):
# table 10.5 panel B
targetIndicator = group_ts.index.get_level_values(targetGroup)[0][:-1] # targetGroup
# controlIndicator = group_ts.index.get_level_values(controlGroup)[0][:-1] # controlGroup
a1_data = group_ts.groupby(['t', targetGroup, controlGroup]).mean().groupby(['t', targetGroup]).mean()
stk = a1_data.unstack()
stk.index = stk.index.astype(str)
stk.columns = stk.columns.astype(str)
stk[targetIndicator + str(self.q) + '-1'] = stk[targetIndicator + str(self.q)] - stk[targetIndicator + '1']
_a=adjust_with_riskModel(stk)
_b=adjust_with_riskModel(stk,'capm')
table = pd.concat([_a, _b], axis=0)
return table
def _independent_portfolio_analysis(self, group_ts):
# table 9.8
table1 = self._cal_portfolio_return(group_ts, controlGroup='g1', targetGroup='g2')
table2 = self._cal_portfolio_return(group_ts, controlGroup='g2', targetGroup='g1').T
table1, table2 = get_outer_frame([table1, table2])
table = table1.fillna(table2)
return table
@monitor
def independent_portfolio_analysis(self):
comb = self._get_independent_data()
group_eavg_ts, group_wavg_ts = self._get_eret(comb)
table_eavg = self._independent_portfolio_analysis(group_eavg_ts)
table_wavg = self._independent_portfolio_analysis(group_wavg_ts)
self.results['ind_eq_{}_{}'.format(self.indicator1,self.indicator2)]=table_eavg
self.results['ind_vw_{}_{}'.format(self.indicator1,self.indicator2)]=table_wavg
# table_eavg.to_csv(os.path.join(self.path,
# 'bivariate independent-sort portfolio analysis_equal weighted_%s_%s.csv' % (
# self.indicator1, self.indicator2)))
# table_wavg.to_csv(os.path.join(self.path,
# 'bivariate independent-sort portfolio analysis_value weighted_%s_%s.csv' % (
# self.indicator1, self.indicator2)))
@monitor
def dependent_portfolio_analysis(self):
def _f(control,target):
comb = self._get_dependent_data(control,target)
group_eavg_ts, group_wavg_ts = self._get_eret(comb)
table_eavg = self._cal_portfolio_return(group_eavg_ts)
table_wavg = self._cal_portfolio_return(group_wavg_ts)
self.results['de_eq_{}_{}'.format(control,target)]=table_eavg
self.results['de_vw_{}_{}'.format(control,target)]=table_wavg
# table_eavg.to_csv(os.path.join(self.path,
# 'bivariate dependent-sort portfolio analysis_equal weighted_%s_%s.csv' % (
# indicators[0], indicators[1])))
# table_wavg.to_csv(os.path.join(self.path,
# 'bivariate dependent-sort portfolio analysis_value weighted_%s_%s.csv' % (
# indicators[0], indicators[1])))
_f(self.indicator1,self.indicator2)
_f(self.indicator2,self.indicator1)
@monitor
def dependent_portfolio_analysis_twin(self):
'''table 10.5 panel B'''
def _f(control,target):
comb = self._get_dependent_data(control,target)
group_eavg_ts, group_wavg_ts = self._get_eret(comb)
table_eavg = self._average_control_variable_portfolios(group_eavg_ts)
table_wavg = self._average_control_variable_portfolios(group_wavg_ts)
self.results['de1_eq_{}_{}'.format(control,target)]=table_eavg
self.results['de1_vw_{}_{}'.format(control,target)]=table_wavg
# table_eavg.to_csv(os.path.join(self.path,
# 'bivariate dependent-sort portfolio analysis_twin_equal weighted_%s_%s.csv' % (
# indicators[0], indicators[1])))
# table_wavg.to_csv(os.path.join(self.path,
# 'bivariate dependent-sort portfolio analysis_twin_weighted_%s_%s.csv' % (
# indicators[0], indicators[1])))
_f(self.indicator1, self.indicator2)
_f(self.indicator2, self.indicator1)
@staticmethod
def famaMacbeth_reg(indeVars):
#TODO: upgrade fm in Univariate by calling this function
'''
(page 141)The independent variable is winsorized at a given level on a monthly basis.
(page 90)The independent variables are usually winsorized to ensure that a small number of extreme
independent variable values do not have a large effect on the results of the regression.
In some cases the dependent variable is also winsorized.When the dependent variable is a
security return or excess return,this variable is usually not winsorized.In most other
cases,it is common to winsorized the dependent variable.
'''
comb=combine_with_datalagged(indeVars)
comb=comb.dropna()
#trick: the data is already winsorized before calling dataApi
# winsorize
# comb[indeVars]=comb.groupby('t')[indeVars].apply(
# lambda x:winsorize(x,limits=WINSORIZE_LIMITS,axis=0))
namedict={inde:'name{}'.format(i) for i,inde in enumerate(indeVars)}
comb=comb.rename(columns=namedict)
formula = 'stockEretM ~ ' + ' + '.join(namedict.values())
# TODO:lags?
r, adj_r2, n, firstStage_params = famaMacBeth(formula, 't', comb, lags=5)
r = r.rename(index={v:k for k,v in namedict.items()})
# save the first stage regression parameters
firstStage_params = firstStage_params.rename(
columns={v:k for k,v in namedict.items()})
params = r[['coef', 'tvalue']].stack()
params.index = params.index.map('{0[0]} {0[1]}'.format)
params['adj_r2'] = adj_r2
params['n'] = n
return params,firstStage_params
def _fm(self,x):
'''
:param x: a list of list,or just a list contains the name of independent variables
:return:
'''
if isinstance(x[0],str):
p, firstStage_params = self.famaMacbeth_reg(x)
self.results['fm_para']=firstStage_params
self.results['fm']=p
# firstStage_params.to_csv(os.path.join(self.path, 'first stage parameters ' + '_'.join(x) + '.csv'))
# p.to_csv(os.path.join(os.path.join(self.path,'fama macbeth regression analysis.csv')))
if isinstance(x[0],list):
ps=[]
for indeVars in x:
p,firstStage_params=self.famaMacbeth_reg(indeVars)
self.results['fm_para']=firstStage_params
# firstStage_params.to_csv(os.path.join(self.path, 'first stage parameters ' + '_'.join(indeVars) + '.csv'))
ps.append(p)
table = pd.concat(ps, axis=1, keys=range(1, len(x) + 1),sort=True)
all_indeVars = list(set(var for l_indeVars in x for var in l_indeVars))
newIndex = [var + ' ' + suffix for var in all_indeVars for suffix in ['coef', 'tvalue']] + \
['Intercept coef', 'Intercept tvalue', 'adj_r2', 'n']
table = table.reindex(index=newIndex)
# table.to_csv(os.path.join(os.path.join(self.path, 'fama macbeth regression analysis.csv')))
self.results['fm']=table
def save_results(self):
excelWriter=pd.ExcelWriter(os.path.join(self.path,'{}_{}.xlsx'.format(self.indicator1,self.indicator2)))
for k in self.results:
self.results[k].to_excel(excelWriter,k)
#TODO: wrong!!!! For predictors with accounting data updated annually
| [
"13163385579@163.com"
] | 13163385579@163.com |
0da11d84bb2716fc9df87d34b678a04e4e740506 | 5cac13969210aa062b0c4d51f56111a7400f6b5d | /main.py | 9d47fd89194ceee088182f302a1436fb68e8968d | [] | no_license | hehehe0803/CS498B_senior-project | e7283aec954398bc627bc33c7153a4689cd06650 | 2fab95d922b810ba4a8f3919712ba4787da9e953 | refs/heads/main | 2023-04-01T03:27:29.934757 | 2021-03-29T16:04:02 | 2021-03-29T16:04:02 | 343,339,142 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,636 | py | import cv2
from PIL import Image, ImageOps
from keras import models
import numpy as np
def main():
model = models.load_model('./detect_model/keras_model.h5')
cap = cv2.VideoCapture(0)
categories = ['Skull', 'Pikachu', 'Man', 'House']
while (True):
# Capture frame-by-frame
ret, frame = cap.read()
# Create the array of the right shape to feed into the keras model
# The 'length' or number of images you can put into the array is
# determined by the first position in the shape tuple, in this case 1.
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
# Convert the captured frame into RGB
image = Image.fromarray(frame, 'RGB')
# resize the image to a 224x224 with the same strategy as in TM2:
# resizing the image to be at least 224x224 and then cropping from the center
size = (224, 224)
image = ImageOps.fit(image, size, Image.ANTIALIAS)
# turn the image into a numpy array
image_array = np.asarray(image)
# Normalize the image
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# Load the image into the array
data[0] = normalized_image_array
# run the inference
prediction = model.predict(data)
print(categories[np.argmax(prediction)])
# Display the resulting frame
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
| [
"hehehe0803@gmail.com"
] | hehehe0803@gmail.com |
fa7e40b0bb754bc7b775b514c47ad6387e9aded8 | 1ecb394b10e9622a5a5d8845b44e4585f464d42e | /nncp-rpc/lib/logic/Ticket/jl.py | 6eba7a4aba23fda0584343d0701709d8cb297dec | [] | no_license | dragonflylxp/lottory | 7ec28d196f58692d9d417aa5d6963c182afe260a | b04f115df325a58148dc19d7cdfc21b28892a6a1 | refs/heads/master | 2020-04-28T08:53:09.007092 | 2020-04-17T10:50:41 | 2020-04-17T10:50:41 | 175,145,951 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,125 | py | #encoding=utf-8
import MySQLdb
import traceback
import define
from dbpool import db_pool
from util.tools import Log
from common.dbs import BaseModel, access
from baseticket import BaseTicket
logger = Log().getLog()
class JlTicket(BaseTicket):
def __init__(self):
super(JlTicket, self).__init__(47)
@access("w")
def save_tickets(self, params):
project = params.get("project")
tickets = params.get("tickets")
mid = params.get("mid", None)
uid = project.get("f_uid")
pid = project.get("f_pid")
lotid = project.get("f_lotid")
try:
if mid is not None:
#更新msg状态
sql = "UPDATE t_msg_record SET f_msgstatus=%s WHERE f_mid=%s AND f_msgstatus=%s"
ret = self.cursor.execute(sql, (define.MSG_STATUS_DONE, mid, define.MSG_STATUS_NEW))
if ret < 1:
logger.warning("Tickets already saved! lotid=%s|pid=%s|mid=%s", 28, pid, mid)
raise Exception("Tickets already saved!")
sql = """
INSERT INTO t_ticket_jl(
f_uid,
f_pid,
f_lotid,
f_wtype,
f_ggtype,
f_beishu,
f_zhushu,
f_allmoney,
f_fileorcode,
f_firstprocessid,
f_lastprocessid,
f_ticketstatus)
VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)
"""
args = []
for tkt in tickets:
tpl = (uid, pid, lotid, tkt["wtype"], tkt["ggtype"], tkt["beishu"],tkt["zhushu"], tkt["allmoney"],
tkt["fileorcode"], tkt["firstprocessid"], tkt["lastprocessid"], define.TICKET_STATUS_SAVED)
args.append(tpl)
self.cursor.executemany(sql, args)
self.conn.commit()
except Exception as ex:
logger.error(traceback.format_exc())
self.conn.rollback()
raise
return pid
| [
"noreply@github.com"
] | noreply@github.com |
bfc4a81a2576286e533d2b117dd711bc3d73d013 | 3c27b86f0165ab24e6b04d505e8471e032594f0b | /pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/OpenGL/GLES2/EXT/shadow_samplers.py | 119ce82880ccfe3b97741cc729ccd3611e990b3f | [
"LicenseRef-scancode-warranty-disclaimer",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"LGPL-2.1-or-later",
"GPL-3.0-only",
"LGPL-2.0-or-later",
"GPL-3.0-or-later",
"MIT"
] | permissive | alexus37/AugmentedRealityChess | 8b9ccdfffc8aee93a86a44b8ef53c034ec6a10d1 | 7f600ad153270feff12aa7aa86d7ed0a49ebc71c | refs/heads/master | 2020-12-24T13:29:21.967833 | 2020-02-27T09:38:50 | 2020-02-27T09:38:50 | 31,264,034 | 1 | 1 | MIT | 2020-02-27T09:38:52 | 2015-02-24T14:36:34 | Python | UTF-8 | Python | false | false | 774 | py | '''OpenGL extension EXT.shadow_samplers
This module customises the behaviour of the
OpenGL.raw.GLES2.EXT.shadow_samplers to provide a more
Python-friendly API
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/EXT/shadow_samplers.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.EXT.shadow_samplers import *
from OpenGL.raw.GLES2.EXT.shadow_samplers import _EXTENSION_NAME
def glInitShadowSamplersEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | [
"alexlelidis@gmx.de"
] | alexlelidis@gmx.de |
e18b37f1f5ca204f05df55aacb6448e4a970b35a | 3a9bad174cf24244add6dc0ae9396a226d923c18 | /pythonStuding/test1/socketServer1.py | 08582ff85e173d071304032a3b63fdf33f17e51c | [] | no_license | ckeyer/outmoded | 334bae2b21c57f5a19365631c1dc4eb982361f2f | c9911f4916d9b9441c1c048697f7158bacb4aad6 | refs/heads/master | 2021-01-01T19:52:35.412067 | 2015-04-02T18:10:30 | 2015-04-02T18:10:30 | 24,946,963 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,273 | py | #-*- coding: utf-8 -*-
from socket import *
from time import ctime
from time import localtime
import time
HOST=''
PORT=1122 #设置侦听端口
BUFSIZ=1024
ADDR=(HOST, PORT)
sock=socket(AF_INET, SOCK_STREAM)
sock.bind(ADDR)
sock.listen(5)
#设置退出条件
STOP_CHAT=False
while not STOP_CHAT:
print('等待接入,侦听端口:%d' % (PORT))
tcpClientSock, addr=sock.accept()
print('接受连接,客户端地址:',addr)
while True:
try:
data=tcpClientSock.recv(BUFSIZ)
except:
#print(e)
tcpClientSock.close()
break
if not data:
break
#python3使用bytes,所以要进行编码
#s='%s发送给我的信息是:[%s] %s' %(addr[0],ctime(), data.decode('utf8'))
#对日期进行一下格式化
ISOTIMEFORMAT='%Y-%m-%d %X'
stime=time.strftime(ISOTIMEFORMAT, localtime())
s='%s发送给我的信息是:%s' %(addr[0],data.decode('utf8'))
tcpClientSock.send(s.encode('utf8'))
print([stime], ':', data.decode('utf8'))
#如果输入quit(忽略大小写),则程序退出
STOP_CHAT=(data.decode('utf8').upper()=="QUIT")
if STOP_CHAT:
break
tcpClientSock.close()
sock.close() | [
"wangcj1214@gmail.com"
] | wangcj1214@gmail.com |
d75eee747f6822c8a9b790c92c8e2d58fcc5cb27 | fcc58c7486ea4c238a8286b6385b7e1ac0052725 | /kirjasovellus/events.py | d3c2170be70122be72d76ad51e05217af751b24f | [] | no_license | Juboskar/kirjapaivakirja | 087407097cd2d7ed61488d52bae729a0c60c401c | a27405b4051783ff06ee3334c700ef5643d1fb90 | refs/heads/main | 2023-04-27T21:49:48.659102 | 2021-05-09T10:56:57 | 2021-05-09T10:56:57 | 349,436,791 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | class Event:
def __init__(self, event_type: str, time, content: tuple):
self.type = event_type
self.time = time
self.content = content
def concatenate_event_lists(ratings: tuple, progress_updates: tuple):
events = []
for i in ratings:
events.append(Event("rating", i[5], i))
for i in progress_updates:
events.append(Event("progress", i[4], i))
def order(e: Event):
return e.time
events.sort(key=order, reverse=True)
return events[:30] | [
"oskari.juvakka@gmail.com"
] | oskari.juvakka@gmail.com |
e610698786f353d5ba7f3030dbe71ec75c1973c3 | ff384d954b2c86a90e9982a444df721d50a3de93 | /boutique_ado/urls.py | ba354b2c0e81d28766434e305e045c8c60287f0a | [] | no_license | davidjbuchanan/boutique_ado_v1 | 7553206033e89f45c6821c4e25c8a184cfe68e58 | e55d519a633ad25833a2270ddd00bff7fa1cdf2f | refs/heads/master | 2023-08-02T20:44:36.996659 | 2020-07-03T09:02:28 | 2020-07-03T09:02:28 | 272,670,738 | 0 | 0 | null | 2021-09-22T19:15:43 | 2020-06-16T09:49:54 | HTML | UTF-8 | Python | false | false | 1,209 | py | """boutique_ado URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('accounts/', include('allauth.urls')),
path('', include('home.urls')),
path('products/', include('products.urls')),
path('bag/', include('bag.urls')),
# these are top level url path e.g. products/ (what goes in the search bar) and then the reference to the url in the products app
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"davidjbuchanan@gmail.com"
] | davidjbuchanan@gmail.com |
f49aa08c5251a7ed5487d2372a8c33761a0ce5de | 4ef9b96045d456145c55321b33ecda849c87620c | /bambazeled/modules/python_lib/python_lib.py | 09ea662cad8437c3feffd4d5a35c8c2101849933 | [] | no_license | pickledgator/bambazeled | 58aea1c2a12c0abc165260eb9ca62fdb895e6802 | fd5661173d05cd974910b96984a4a5d7ffe830d7 | refs/heads/master | 2020-04-10T04:10:00.756641 | 2018-12-25T22:40:47 | 2018-12-25T22:40:47 | 160,790,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | #!/usr/bin/env python
class PythonLib:
def __init__(self, init = 0):
if init < 0:
raise Exception("Initial value must be positive.")
self._value = init
def increment_by(self, value):
if self._value + value >= 10000:
self.reset()
self._value += value
def reset(self):
self._value = 0
def get_value(self):
return self._value
| [
"nic.fischer@postmates.com"
] | nic.fischer@postmates.com |
5f653a3d19775919a62112eb017f0dae4dfaeb77 | 7be2397ccb3cee0f2acc24081934e0ecb146115f | /leetcode/editor/cn/[179]最大数.py | e45f99f17a83b9336b8b7d48da71bc72e295ae1a | [
"BSD-3-Clause"
] | permissive | LuoMaimingS/LeetCode | 77262d424498175ab1f29f97784b0e970c60fae0 | e6b0bb2ab5a958d06416178cfbc1a557b0315338 | refs/heads/master | 2021-05-16T21:24:16.110904 | 2020-08-04T15:45:46 | 2020-08-04T15:45:46 | 250,474,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 623 | py | # 给定一组非负整数,重新排列它们的顺序使之组成一个最大的整数。
#
# 示例 1:
#
# 输入: [10,2]
# 输出: 210
#
# 示例 2:
#
# 输入: [3,30,34,5,9]
# 输出: 9534330
#
# 说明: 输出结果可能非常大,所以你需要返回一个字符串而不是整数。
# Related Topics 排序
# 👍 327 👎 0
# leetcode submit region begin(Prohibit modification and deletion)
class Solution(object):
def largestNumber(self, nums):
"""
:type nums: List[int]
:rtype: str
"""
# leetcode submit region end(Prohibit modification and deletion)
| [
"mp1833013@smail.nju.edu.cn"
] | mp1833013@smail.nju.edu.cn |
ee81876807588c0e2367750bcb141750ead0ac37 | 702737408d2ddfa60c1a0aa94a3a3685a0c5f89e | /Testapp.py | 1eaa3b86e274902818290165e278595ce9a23cf5 | [] | no_license | Thembapa/Kasivore | 5653182f629ae89080fa525e925ad82ec9e30925 | 6eca64ae4d0dd649ad8b8e09f0962914e478aa5f | refs/heads/master | 2021-01-07T01:48:59.087390 | 2020-07-13T19:55:03 | 2020-07-13T19:55:03 | 241,543,139 | 0 | 0 | null | 2020-04-07T20:06:56 | 2020-02-19T05:46:00 | JavaScript | UTF-8 | Python | false | false | 125 | py |
import DistanceCalc
print('distance: ' + str(DistanceCalc.estimatedDistance (-26.3194776,28.255009,-23.8912248,30.327695))) | [
"thembapa@gmail.com"
] | thembapa@gmail.com |
b552ba1a83f2b1221607ee157be3200b3163a86d | 7b7df51427a5bbcd4b144a8aba9fb4c2588550ba | /{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/celery/app.py | 474517def62eb56ec64490e3165c374e28a387cb | [
"MIT"
] | permissive | tegia/cookiecutter-scrapy | 595eefddb5ff0aa20378e8460bc3ab4bb875c109 | 662cf7f99c0003d2a2fe3186c4dc0401e5382e28 | refs/heads/master | 2022-04-09T15:40:35.826637 | 2020-03-25T04:03:05 | 2020-03-25T04:03:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | from celery import Celery
from {{cookiecutter.project_slug}}.configs import settings
app = Celery('tasks', broker=settings.CELERY_BROKER_URL)
@app.task(name="{{cookiecutter.project_slug}}.pipeline.process_item")
def process_item(item):
"""
Body method is defined here or something else (eg. in django project)
"""
pass | [
"cuongnb14@gmail.com"
] | cuongnb14@gmail.com |
fd21d5692c2978de060bef8554be85f67ab784b6 | 1c12441e8564066688443d56dac8cb8be2e9ae40 | /WiseNews.py | 01e87fc1b41f8c052331b5610a7eb575b05eed02 | [] | no_license | jayvischeng/WiseNewsCopy | 80333bb6bb40ec6ef86dd7db592a60a5b767268f | 5ba2b8aa6f58d966ec8cb1bc748e7d4b2975c661 | refs/heads/master | 2021-05-28T19:59:26.327502 | 2015-07-28T05:55:17 | 2015-07-28T05:55:17 | null | 0 | 0 | null | null | null | null | WINDOWS-1252 | Python | false | false | 13,576 | py | # #-*- encoding: Big5-HKSCS -*-
import requests
from bs4 import BeautifulSoup
import os
import re
import shutil
#import sys
##type = sys.getdefaultencoding()
##reload(sys)
##sys.setdefaultencoding('gbk')
#global base,Start, Interval, articleisinUrlList, Page, path, newfilepath,PageErrorList,TitleErrorList,IndexErrorList
#def Run(url_block1):
def Init(thecookie):
global articleisinUrlList,mycookie,base,Start, Interval, Page, path, newfilepath,PageErrorList,TitleErrorList,IndexErrorList
para=[]
articleisinUrlList=False
base = "http://cmispub.cicpa.org.cn/cicpa2_web/PersonIndexAction.do"
Start = -50
Interval = 50
articleisinUrlList = False
Page = 0
path = os.getcwd()
#mycookie = "__p_scid_CITYULIB_ipaccess=\"CITYULIB@ipaccess|84502399|0|t1ms1|014c46fd1d6f\"; cMACHINECOOKIE=14c27a62e00; JSESSIONID=A17D0828B09A4238F94586DBA3AC07B9.wise19; gallery-simplegallery1=1; __reLoginUrl=\"\"; __lsid=^t1ms1.wisers.com^84502399; cUSERNAME=\"CITYULIB@ipaccess\"; __lst_libwisenews.wisers.net=1427110827442"
mycookie=thecookie
#PageErrorList = []
TitleErrorList = []
IndexErrorList = []
#if not os.path.isdir(newfilepath):
#os.makedirs(newfilepath)
#else:
#shutil.rmtree(newfilepath)
#os.makedirs(newfilepath)
#with open(path + "/PageList", "w")as f1:
#f1.write("PageList\n")
#with open(path + "/ArticleList", "w")as f1:
#f1.write("ArticleList\n")
url_block1 = "TRACK-01605"
para.append(url_block1)
para.append(Page)
para.append(Start)
return para
def GeneratePageListUrl(url_block1,Page,Start):
Pages=2193
while (Page<Pages):
isPageinList = 0
Page = Page + 1
Start = Start + Interval
url_block2 = str(Start)
url_block3 = str(Start + Interval)
url="http://libwisenews.wisers.net/wisenews/content.do?wp_dispatch=menu-content&menu-id=/commons/CFT-HK/DA000-DA003-DA010-/DA000-DA003-DA010-65029-&srp_save&cp&cp_s="+url_block2+"&cp_e="+url_block3
#url = "http://libwisenews.wisers.net/wisenews/content.do?wp_dispatch" \
#"=menu-content&menu-id=/userfolder/" + url_block1 + "-&srp_save&cp&cp_s=" + url_block2 + "&cp_e=" + url_block3
with open(path + "/PageList", "a+")as foutpage:
val_page = foutpage.readlines()
if len(val_page) > 0:
for tab in range(len(val_page)):
if url == val_page[tab]:
isPageinList = 1
break
if isPageinList == 0:
foutpage.write(url + '\n')
else:
foutpage.write(url + '\n')
def GenePageUrl(page_urls):
with open("Count.txt")as fin2:
count_val=fin2.readlines()
ArticleCount=int(count_val[0].strip())
ArticleListCount=int(count_val[1].strip())
#r = requests.get(url)
#mycookie = "__p_scid_CITYULIB_ipaccess=\"CITYULIB@ipaccess|84502399|0|t1ms1|014c46fd1d6f\"; cMACHINECOOKIE=14c27a62e00; JSESSIONID=A17D0828B09A4238F94586DBA3AC07B9.wise19; gallery-simplegallery1=1; __reLoginUrl=\"\"; __lsid=^t1ms1.wisers.com^84502399; cUSERNAME=\"CITYULIB@ipaccess\"; __lst_libwisenews.wisers.net=1427110827442"
#"__p_scid_CITYULIB_ipaccess=\"CITYULIB@ipaccess|84434622|0|t1ms1|014c4168733a\"; cMACHINECOOKIE=14c27a62e00; JSESSIONID=B5381EE186BFB6F87E75C01CD524D2BF.wise18; __reLoginUrl=\"\"; __lsid=^t1ms1.wisers.com^84434622; cUSERNAME=\"CITYULIB@ipaccess\"; __lst_libwisenews.wisers.net=1427024125134"
#tab_pages=2#The max is 283
#total_pages=0#The max is 283*50
TotoalPageList=[]
for tab_pages in range(len(page_urls)):
#if tab_pages>=0:
page_url=page_urls[tab_pages].strip()
myheaders = {"Request-Line":"POST /cicpa2_web/PersonIndexAction.do HTTP/1.1",
"Accept": "text/html, application/xhtml+xml, image/jxr, */*",
"Accept-Encoding":"gzip, deflate",
"Connection": "Keep-Alive",
"Host": "cmispub.cicpa.org.cn",
"Referer": "http://cmispub.cicpa.org.cn/cicpa2_web/PersonIndexAction.do",
"Content-Type": "application/x-www-form-urlencoded",
"Accept-Language":"zh-CN",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko",
"Cookie": mycookie
}
#print(page_url)
r = requests.get(page_url, headers=myheaders)
r.encoding = "Big5-HKSCS"
listcontent = r.text
beautiful_content = BeautifulSoup(listcontent)
Temp = beautiful_content.findAll(name='a', attrs={'href': re.compile(r"/wisenews/content\.do.*list")})
#print("The length of Temp is "+str(len(Temp)))
if len(Temp)==0:
continue
TitleList = []
UrlList = []
TempTitleList = []
TempUrlList = []
pattern1 = re.compile(r"/wisenews/content\.do.*text")
pattern2 = re.compile(r"<span class=\"ClipItemTitle\">.*</span>")
for tab in range(len(Temp)):
TempUrlList.append(pattern1.findall((str(Temp[tab]))))
TempTitleList.append(pattern2.findall(str(Temp[tab])))
for tab in range(len(Temp)):
if len(TempUrlList[tab])>0:
UrlList.append(base + str(TempUrlList[tab][0]).replace('amp;', ''))
#else:
#print("111111111111111111")
#break
if len(TempTitleList[tab])>0:
TitleList.append(str(TempTitleList[tab][0]).replace('<span class="ClipItemTitle">', '').replace('</span>', ''))
ArticleCount=ArticleCount+1
if (len(UrlList) == 0):
print("The Length of UrlList is 0 ")
else:
for tab_j in range(len(UrlList)):
try:
TotoalPageList.append(str(TitleList[tab_j])+"@#$"+str(UrlList[tab_j]))
except:
print(str(len(TitleList))+"/////////////////////"+str(len(UrlList))+str(tab_j))
if ArticleCount>=(ArticleListCount+1)*10000:
ArticleListCount+=1
with open(path+"/ArticleList "+str(ArticleListCount)+"~"+str(ArticleListCount+1)+"0000", "w")as fout:
#val = fout.readlines()
for tab1 in range(len(TotoalPageList)):
fout.write(TotoalPageList[tab1]+'\n')
TotoalPageList=[]
"""
if len(val)>0:
print(len(val))
for tab1 in range(len(val)):
val_eachline = val[tab1]
for tab2 in range(len(UrlList)):
if UrlList[tab2] in val_eachline:
continue
else:
#print(TitleList[tab2] + "@#$" + UrlList[tab2]+'1111111111111111111\n')
fout.write(TitleList[tab2] + "@#$" + UrlList[tab2]+'\n')
else:
for tab3 in range(len(UrlList)):
#print(TitleList[tab3] + "@#$" + UrlList[tab3]+'22222222222222222222\n')
fout.write(TitleList[tab3] + "@#$" + UrlList[tab3]+'\n')
"""
with open("Count.txt","w")as fout2:
fout2.write(str(ArticleCount)+'\n')
fout2.write(str(ArticleListCount))
#fout.write(OutputText)
#except:
#TitleErrorList.append(TitleList[tab1])
print("The " + str(ArticleCount) + " ArticleUrl_____is Complete!!!And The Length of Page Url is "+str(len(Temp)))
#print(tab_pages)
#time.sleep(10)
def RunCrawler(mycookie,newfilepath):
if not os.path.isdir(newfilepath):
os.makedirs(newfilepath)
else:
pass
#shutil.rmtree(newfilepath)
#os.makedirs(newfilepath)
myheaders2 = {"Host":"libwisenews.wisers.net",
"Referer":"http://libwisenews.wisers.net/wisenews/index.do?new-login=true",
"Content-type": "text/html; charset=utf-8",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept Language": "zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:36.0) Gecko/20100101 Firefox/36.0",
"Cookie":mycookie
}
path=os.getcwd()
filelist=os.listdir(path)
#t=0
for eachfile in filelist:
if "Article" in eachfile:
ArticleCount=1
with open(eachfile) as fin:
print("LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL"+str(eachfile))
for eacharticle in fin:
#print("The "+str(ArticleCount)+" Article_____is Processing!!!")
(Title,Url)=eacharticle.split('@#$')
if not os.path.isfile(newfilepath+"/"+Title):
#t=t+1
#print(Title)
temprequest=requests.get(Url,headers=myheaders2)
soup=BeautifulSoup(temprequest.text)
scripttext=[x.extract() for x in soup.find_all('script')]
OutputText=soup.text.strip().replace('\n','')
pattern3=re.compile(r"\.ftNotice.*}")
OutputText2=pattern3.findall(OutputText)
#print("TTTTTTTTTTTTTTTTTTTTTTTTTTTTTT"+str(OutputText2[0]))
try:
pattern4=re.compile(OutputText2[0])
OutputText=pattern4.sub("###",OutputText)
if '/' in Title:
Title=Title.replace("/","")
elif '\\' in Title:
Title=Title.replace("\\","")
elif '¡]' in Title:
Title=Title.replace("¡]","")
elif '¡^' in Title:
Title=Title.replace("¡^","")
with open(newfilepath+"/"+Title,"w")as fout:
fout.write(OutputText)
except:
pass
#print("The "+str(ArticleCount)+" Article_____is Complete!!!")
ArticleCount=ArticleCount+1
else:
pass
#t=t+1
#print(str(t)+"LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL"+str(eachfile))
#pass
#print("The "+str(ArticleCount)+" Article_____is Completed!!!") #TitleErrorList.append(Title)
else:
print("There is Error when crawlering Url!!!!!!!!!!")
from threading import Thread
import multiprocessing
import time
def worker():
#name = multiprocessing.current_process().name
#print(name, 'Starting')
#global articleisinUrlList,ArticleListCountCount,ArticleCount,mycookie,base,Start, Interval, Page, path, newfilepath,PageErrorList,TitleErrorList,IndexErrorList
para=Init(mycookie)
GeneratePageListUrl(para[0],para[1],para[2])
path=os.getcwd()
with open(path + "/PageList")as fin_pageurl:
page_urls= fin_pageurl.readlines()
#for i in range(283):
#print("hahahha"+str(i))
GenePageUrl(page_urls)
#time.sleep(2)
#print(name, 'Exiting')
def worker2():
Init(mycookie)
name = multiprocessing.current_process().name
print(name, 'I am doing nothing but just print..........................................\\\\\\')
time.sleep(2)
GenePageUrl()
def my_service():
name = multiprocessing.current_process().name
print(name, 'Starting')
print(mycookie)
time.sleep(10)
RunCrawler(mycookie)
#time.sleep(3)
print(name, 'Exiting')
if __name__=="__main__":
global A_count,B_count
#para=Init()
#GenerateUrl(para[0],para[1],para[2])
#RunCrawler(mycookie)
#Thread(target=GenerateUrl(para[0],para[1],para[2]),args=()).start()
#Thread(target=RunCrawler(mycookie),args=()).start()
#service = multiprocessing.Process(name='my_service',
#target=my_service)
#worker_1 = multiprocessing.Process(name='worker_1',target=worker)
#worker_2 = multiprocessing.Process(target=worker2) # default name
# global articleisinUrlList,ArticleListCountCount,ArticleCount,mycookie,base,Start, Interval, Page, path, newfilepath,PageErrorList,TitleErrorList,IndexErrorList
OutputFileFolder = input("Please Input the File Folder that You Want to Save your Files...")
newfilepath = os.getcwd() + '/' + str(OutputFileFolder)
mycookie ="__p_scid_CITYULIB_ipaccess=\"CITYULIB@ipaccess|87409174|0|t2ms1|014d37fb41f3\"; cMACHINECOOKIE=14cf9f4476f; JSESSIONID=30164574E03195955B4EB83A2875DD7A.wise25; __reLoginUrl=\"\"; __lsid=^t2ms1.wisers.com^87409174; cUSERNAME=\"CITYULIB@ipaccess\"; __lst_libwisenews.wisers.net=1431121643978"
with open("Count.txt","w")as fout2:
fout2.write(str(0)+'\n')
fout2.write(str(0))
#worker()
RunCrawler(mycookie,newfilepath)
#worker_2.start()#notice avoid to write duplicate content
#service.start() | [
"designer357@gmail.com"
] | designer357@gmail.com |
db3d7eda1e87530089374a33401a01e549e7a541 | 85731130fd28bf26bcd4a662368e4de255eda8e3 | /python_practice/prac9.py | 07fe3f71dd21cc3c621d15276bf93ca68b6fcfeb | [] | no_license | chanish-teramatrix/python-practiceCodes | 4670b22bd4309fbcd790c686e76e3c720fc84ea4 | 8e7ec8f065fd42516fb5b38534d5998866ec936b | refs/heads/master | 2016-09-06T10:04:29.033646 | 2015-09-04T11:46:45 | 2015-09-04T11:46:45 | 40,706,353 | 0 | 0 | null | 2015-09-02T12:24:45 | 2015-08-14T09:14:40 | Python | UTF-8 | Python | false | false | 92 | py | #!/bin/python
#module2
def even_odd(x):
if x%2 == 0:
print 'even'
else:
print 'odd'
| [
"chanish.agarwal@teramatrix.in"
] | chanish.agarwal@teramatrix.in |
bdc0672a1f40976597bd0ac8992d513ab72c649c | 24eced3f950ca198baa1fa0faa95cfd976e61b84 | /sem1/lista1sol.py | 4af2049360a3d3b9739bf44c974a41d1abb036e6 | [] | no_license | unisinos-labprog/python | a3bc3d3c9bd2971fadb57ef4dcae1ed0a8cc3316 | 33e51796b0511c3ea6001b0c4102355c2c884b99 | refs/heads/master | 2022-12-05T06:26:44.205873 | 2020-08-21T21:51:16 | 2020-08-21T21:51:16 | 284,353,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,616 | py | #Primeira Lista
import math
# 1 ------------------------------------------------------
print ("Alô mundo!")
# 2 ------------------------------------------------------
num1 = int(input("Qual o primeiro número?"))
num2 = int(input("Qual o segundo número?"))
print("a soma de ",num1," e ",num2," é ", num1 + num2)
print("o produto de ",num1," e ",num2," é ", num1 * num2)
# 3 ------------------------------------------------------
saldoInicial = float(input("Saldo inicial?"))
receita = float(input("Receita?"))
despesa = float(input("Despesa?"))
print("Saldo Final é de ", saldoInicial + receita - despesa)
# 4 ------------------------------------------------------
valor = int(input("Qual o valor?"))
print("a raiz quadrada de ",valor," é ", math.sqrt(valor))
# 5 ------------------------------------------------------
raio = float(input("Qual o raio?"))
altura = float(input("Qual a altura?"))
print("Volume é ", math.pi * raio**2 * altura)
# 6 ------------------------------------------------------
a = int(input("Qual o valor de A?"))
b = int(input("Qual o valor de B?"))
temp = a
a = b
b = temp
print("O valor de A é", a)
print("O valor de B é", b)
# 7 ------------------------------------------------------
tempC = float(input("Qual o temperatura (em Celsius)?"))
print("Temperatura em Fahrenheit = ", (9*tempC+160)/5)
# 8 ------------------------------------------------------
notaGA = float(input("Qual a nota do GA?"))
notaGB = float(input("Qual a nota do GB?"))
print("A média é ", (notaGA+2*notaGB)/3)
| [
"ernie.linds@gmail.com"
] | ernie.linds@gmail.com |
5738b8ee28520b927063ca1c7c865b6ad5628621 | 39f4d05b6bb72f84b4ed0d99c7a3e100ea9fdfa5 | /thekrustykrab/mixlist/migrations/0004_auto_20180327_1514.py | 6fbc8b467b8d33a5e2d214167334ef12f090ba33 | [] | no_license | SuedeOO/326Project1 | 20a386dbe0cc7b03603c7fb8049f21a5dc3c5774 | 54397b718ea144168a8a862bd19c736ea9c413c5 | refs/heads/master | 2021-04-28T15:20:35.033683 | 2018-05-01T23:40:51 | 2018-05-01T23:40:51 | 121,986,898 | 1 | 2 | null | 2018-03-29T21:10:54 | 2018-02-18T20:16:38 | HTML | UTF-8 | Python | false | false | 598 | py | # Generated by Django 2.0.2 on 2018-03-27 19:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mixlist', '0003_auto_20180324_1450'),
]
operations = [
migrations.AddField(
model_name='mix',
name='description',
field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AddField(
model_name='mix',
name='like_count',
field=models.IntegerField(default=10),
preserve_default=False,
),
]
| [
"mattrobinson125@gmail.com"
] | mattrobinson125@gmail.com |
3d5d6019a9b7ef49e43d555f842b963c4e47b7ac | a594055a3a38a62db98b2e85ec7113d1b2558c1d | /hangman.py | 58c8128759b7c91b88c5d8e43794fab21dcad991 | [] | no_license | ruddyadam/learning | 003fe08d4cd63ae8ed4e0e239584c2705ba1c7b3 | d9530d83f4387b0cd5055592b2f109ad5e20b45f | refs/heads/master | 2021-01-19T13:36:29.530036 | 2017-03-13T11:22:27 | 2017-03-13T11:22:27 | 82,400,704 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,231 | py | import sys
pyVersion = sys.version_info[0]
pySubVersion = sys.version_info[1]
message = '\n********\nYou are currently running Python version {0}.{1}\n********\n'
print(message.format(pyVersion,pySubVersion))
if sys.version_info < (3, 0):
raise ValueError("you must use python 3.x")
else:
print("good\n")
guessList = []
tempAnswer = ''
answer = []
strikeList = []
strike = []
letter = ''
done = False
print('Starting game!')
#to allow the user to enter a secret word, uncomment here
word = input('Enter a secret word: ')
print('\n' * 100)
#to hardcode a single word, uncomment here
#word = 'steinsgate'
for i in word:
tempAnswer = tempAnswer + '_'
answer.append('_')
print('%s' % ' '.join(map(str,tempAnswer)))
#print(answer)
while done == False:
if '_' in answer:
print('%s' % ' '.join(map(str, strike)))
letter = input('Guess a letter!\n')
if not letter in guessList:
guessList.append(letter)
#print('guessed letters: ' + )
print('\n')
g = 0 #used to iterate through ther letters of the secret word
t = 0 #used to find if the letter input was in the secret word
answer =[]
for i in tempAnswer:
if i == '_':
if letter == word[g]:
answer.append(letter)
t = t + 1 #this iterates if a new letter was found in the puzzle
else:
answer.append('_')
else:
answer.append(i)
g = g + 1
if t == 0:
strike.append('['+letter+']') #caleb suggested this change to keep track of letters!! 2-27-17
tempAnswer = answer
print('%s' % ' '.join(map(str, answer)))
# elif :
else:
print('You got it!')
print('The secret word is: ' + word)
print('and it only took you ' + str(len(strike)) + ' strikes!')
if len(strike) < 3:
print('Wow!! Great job!')
elif len(strike) < 11:
print("ok, that's ok.")
elif len(strike) < 21:
print('That was bad!')
else:
print('YOU ARE TERRIBLE!!')
input("\n\npress any key to exit...")
done = True | [
"ruddyadam@gmail.com"
] | ruddyadam@gmail.com |
b5fca79b1608f0797b4b9d9f43d800951d1a52d8 | d4c024cc1330aa86582e0e3f25d5c0f76a9ccbe0 | /align/predict.py | 2f6ebbfb812250d45681908e7d093c5c0b37572c | [] | no_license | jeehyun100/insta_crawling | 464d5a90a614ed4aab1ca28566ad87cbda279447 | 39ada39513bc3655adc2e624c786cc6fd8473a7e | refs/heads/master | 2021-09-07T15:41:57.013861 | 2018-02-25T09:32:36 | 2018-02-25T09:32:36 | 118,861,846 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,797 | py | import os
import cv2
import numpy as np
import tensorflow as tf
from scipy import misc
import align.detect_face as detect_face
#from facenet_tf.src.common import facenet
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
import datetime
import dlib
from imutils.face_utils import rect_to_bb
import face_recognition
import matplotlib.pyplot as plt
class face_detect_crawling(object):
def get_boxes_frame( minsize, pnet, rnet,onet, threshold, factor, frame, detect_type, margin):
boxes = []
img_size = np.asarray(frame.shape)[0:2]
if len(img_size) == 0:
return frame, boxes
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
bounding_boxes, _ = detect_face.detect_face(frame, minsize, pnet, rnet, onet,
threshold, factor)
for bounding_box in bounding_boxes:
det = np.squeeze(bounding_box[0:4])
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0] - margin / 2, 0)
bb[1] = np.maximum(det[1] - margin / 2, 0)
bb[2] = np.minimum(det[2] + margin / 2, img_size[1])
bb[3] = np.minimum(det[3] + margin / 2, img_size[0])
if detect_type == 'dlib':
bb[2] += bb[0]
bb[3] += bb[1]
elif detect_type == 'hog' or detect_type == 'cnn':
bb[1], bb[2], bb[3], bb[0] = bounding_box
if len(boxes) == 0:
boxes.append(bb)
else:
if boxes[0][2] - boxes[0][0] < bb[2] - bb[0]:
boxes[0] = bb
if len(boxes) > 0:
cropped = frame[boxes[0][1]:boxes[0][3], boxes[0][0]:boxes[0][2], :]
else:
cropped = None
return cropped, boxes
def main():
# Arguments #
_detecter = face_detect_crawling()
filename = '/home/dev/insta_crawling/data/2pmhouse/10_20180221064634.jpg'
image = cv2.imread(filename, flags=cv2.IMREAD_COLOR)
config = tf.ConfigProto(device_count={'GPU': 0})
with tf.Session(config=config) as sess:
pnet, rnet, onet = detect_face.create_mtcnn(sess, None)
#frame, self.minsize, self.pnet, self.rnet, self.onet,self.threshold, self.factor
minsize = 20
threshold = [0.6, 0.7, 0.7]
factor = 0.709
margin = 90
#image_size = 300
#cropped_size = 30 # rotation use
detect_type = 'mtcnn' # dlib, mtcnn, hog, cnn
rotation = False
aligned, boxes = face_detect_crawling.get_boxes_frame(minsize, pnet, rnet,onet, threshold, factor, image, detect_type, margin)
if aligned != None:
cv2.imshow("Window", aligned);
print("success")
if __name__ == "__main__":
main()
| [
"intwis100@naver.com"
] | intwis100@naver.com |
7e0763d7d153bdb193b2ca91c433979cec233eae | ad3e604a15890eac09cfe0b4e74fe256597ef6f3 | /basic_read/venv/bin/easy_install | 27458d36c72bf2c74cd235ada044227658909880 | [] | no_license | FrauBoes/iot | 02df543b5a5a8c3c27a4486912a6d59652f1ab17 | f78a0e02a54fec44d6a5f3bf4ac53f70d2a117f7 | refs/heads/master | 2020-04-15T05:41:01.634221 | 2019-01-07T13:03:05 | 2019-01-07T13:03:05 | 164,433,279 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | #!/Users/thelma/Dev/iot/basic_read/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==28.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==28.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==28.8.0', 'console_scripts', 'easy_install')()
)
| [
"juliaboes@posteo.de"
] | juliaboes@posteo.de | |
cb5cce812191e1e05ffa5a6fb5f10082f4ebc88a | 8a51c5e98346e3fe98ed8b60e1adc9efacbe0c4e | /aula_29/exemplo_03.py | 8258bce190e50629f1803d4f078332f0f42df45e | [] | no_license | vtp14/aula_desafio_29 | 0491d05ddeb266452d91e8a2d6c8a5e00b568677 | fb8fdd91b412e3ef287cfd4561baa77fd20f72e5 | refs/heads/main | 2023-08-03T22:54:33.652792 | 2021-09-27T18:28:19 | 2021-09-27T18:28:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | import os
nome = input("Digite seu nome: ")
#int() converte o que foi digitado em um valor inteiro
idade = int(input("Digite sua idade: "))
resp = 100 - idade;
print("------------------------------")
print(" Cadastro ")
print("Nome:",nome)
print("Idade:",idade)
print("Resposta:",resp)
print("------------------------------")
os.system("pause") | [
"vitoria.tobias.paris@gmail.com"
] | vitoria.tobias.paris@gmail.com |
af37f57d01f5ccea972c810929054efcb82c7055 | 85f078686bfe8ca7437e482b9bf016bc4f3327f1 | /users_app/forms.py | 44934162b45ba311ff3176b00f093a80a24fc387 | [] | no_license | saroshkhandev/TaskManager | d9267257b8027fdf8c1e5683e59bb7b01551d599 | afd9d19850db789fc15170f847451ea4f4bf2b89 | refs/heads/master | 2023-06-18T22:25:41.992526 | 2021-07-15T11:07:18 | 2021-07-15T11:07:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 320 | py | from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
class CustomUserCreationForm(UserCreationForm):
email = forms.EmailField(required=True)
class Meta:
model = User
fields = ['username', 'email', 'password1', 'password2']
| [
"saroshabdullah2013@gmail.com"
] | saroshabdullah2013@gmail.com |
b0000f65f8955a9141b9c9455ff591324ae8ec6d | 6b183b67944b169048a930e34608925fb9abdc3e | /xicam/plugins/imagemixinplugin.py | 66f951c285c0e0940ac64a45eb590950abeb7fcb | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause-LBNL"
] | permissive | ihumphrey/Xi-cam | cef31bba712ebf6d330402b9a7cc24d3d096e2b8 | a033a97c4dac55221167d9c4e914c65e835f015a | refs/heads/master | 2022-05-12T22:10:24.970713 | 2021-05-12T22:29:08 | 2021-05-12T22:29:08 | 190,625,609 | 0 | 0 | NOASSERTION | 2019-06-06T17:52:35 | 2019-06-06T17:52:34 | null | UTF-8 | Python | false | false | 509 | py | """
Nothing useful here!
Why?
Because with the PluginType Plugin, we need to register the SnifferPlugin as an entrypoint for the manager to
collect them. In this case, the only meaningful part is the name of the entrypoint, not what it points to. Of course,
it has to point to something, so...
"""
from .plugin import PluginType
class ImageMixinPlugin():
"""
This is just to direct Xi-cam for how to load these plugins; its not intended to be instantiated or subclassed.
"""
needs_qt = True | [
"ronpandolfi@gmail.com"
] | ronpandolfi@gmail.com |
13bca8e49f1768f4c7f6c5f3376bae5edf40196f | 8c1213dec0be90a1ef3e46f767f626acb22be4b8 | /user_dashboard/user_dashboard/settings.py | f5d135773342a981d6679f868b93d7b7ea7c8d7c | [] | no_license | ccunnin8/django | 6d43de7e151f85469f31a0e5da6dc02dfd5101fd | 11f8afb0a787789a24746acf5f1bfe0f4c24e99a | refs/heads/master | 2022-12-12T11:22:04.415053 | 2018-11-13T19:31:00 | 2018-11-13T19:31:00 | 124,255,284 | 0 | 0 | null | 2022-12-08T00:45:36 | 2018-03-07T15:24:56 | Python | UTF-8 | Python | false | false | 3,174 | py | """
Django settings for user_dashboard project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'o+^fxei4e*wvamlie_6x&y&=02zo2gl5!0%a(*!5o6+^0@eklq'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'apps.dashboard',
'apps.main',
'apps.users',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'user_dashboard.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'user_dashboard.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
}
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| [
"coreyjjc@me.com"
] | coreyjjc@me.com |
95aede88f4655f72af544720480e5151aab0d8a1 | ea15d9742ea322e973e5b41de9d25423eb55a4c8 | /tests/metadata/test_continent_metadata.py | 6a58c4608ba745007649ccfa4cae3aae8dd536f6 | [
"MIT"
] | permissive | Math-Avila/dataviva-api | 53d767b6f24ebf0ee9f9477317e3e19270eafd6e | 5d2756b1003695038be32f58b43b11f5ed3e358e | refs/heads/master | 2020-07-29T04:48:12.915430 | 2018-08-17T18:30:28 | 2018-08-17T18:30:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,152 | py | from test_base import BaseTestCase
class TestContinent(BaseTestCase):
def test_continent_path(self):
response = self.client.get('/metadata/continent')
self.assert_200(response)
def test_continent_plural_path(self):
response = self.client.get('/metadata/continents')
self.assert_200(response)
def test_continent_fields(self):
response = self.client.get('/metadata/continent')
key, item = response.json.popitem()
self.assertTrue(item.has_key('name_en'))
self.assertTrue(item.has_key('name_pt'))
self.assertTrue(item.has_key('countries'))
def test_continent_item(self):
response = self.client.get('/metadata/continent/eu')
continent = {
'name_en': 'Europe',
'name_pt': 'Europa',
'countries': [111, 150, 17, 195, 23, 232, 245, 246, 247, 251, 259, 271, 275, 293, 301, 355, 359, 37, 375, 379, 386, 388, 427, 440, 442, 445, 449, 452, 467, 494, 495, 498, 538, 573, 603, 607, 628, 670, 697, 72, 737, 764, 767, 791, 831, 848, 85, 87, 98, 367]
}
self.assertEqual(response.json, continent)
| [
"danieltafuri1998@gmail.com"
] | danieltafuri1998@gmail.com |
0723e9d3c2f3ed3348f8962f73db031393fd5949 | c59738ddfb08134af01d75255c4469071a1e135e | /002_Python_data_analysis_from_entry_to_master/ch10_Numpy科学计算库/02_数组的操作_分片_索引_拼接/005_二维数组_水平_竖直分割split.py | 54eb1c3acdf6b7d1c6d0d1128504a00f3cc4eed3 | [] | no_license | FelixZFB/Python_data_analysis | 371a8460da79e8fdb30b10c02b662419b62a5998 | 62f018d88d8454afe65980efd8d771ac8691956a | refs/heads/master | 2020-05-20T14:46:00.606684 | 2020-02-04T14:25:20 | 2020-02-04T14:25:20 | 185,629,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 552 | py | # -*- coding:utf-8 -*-
import numpy as np
# 生成两个形状一样的二维数组
a = np.arange(16).reshape(4, 4)
print(a)
print("*" * 50)
# 水平竖直分割是拼接的反操作
# 竖直分割: 以行分割
# 水平分割: 以列分割
# 竖直分割,指定被分割为几个数组,数要被整除
b = np.vsplit(a, 2)
print(b)
print("*" * 50)
# 水平分割
c = np.hsplit(a, 2)
print(c)
print("*" * 50)
# 也可以直接使用split函数,指定轴号0,作用于列,以行分割,竖直分割列
e = np.split(a, 2, axis=0)
print(e) | [
"18200116656@qq.com"
] | 18200116656@qq.com |
a7cd4412c6af19405b8adf444583b176a579f717 | 37843eb761fbe20b7e369fb71d81cb9d9b3db3e1 | /dvc/cache/base.py | 38e84869c5a77b8c394d89a6804c5fe2caaaf009 | [
"Apache-2.0"
] | permissive | tall-josh/dvc | e88ebd11335fc3da7ff3b472058a12a84b416efc | aed343f77c0f27f1b5f3a828e450614d2ee17b8d | refs/heads/master | 2022-12-09T10:46:34.886051 | 2020-09-08T09:45:19 | 2020-09-08T09:45:19 | 268,058,946 | 0 | 0 | Apache-2.0 | 2020-08-05T09:48:57 | 2020-05-30T10:40:10 | Python | UTF-8 | Python | false | false | 20,595 | py | import json
import logging
from copy import copy
from shortuuid import uuid
import dvc.prompt as prompt
from dvc.exceptions import (
CheckoutError,
ConfirmRemoveError,
DvcException,
MergeError,
)
from dvc.path_info import WindowsPathInfo
from dvc.progress import Tqdm
from dvc.remote.slow_link_detection import slow_link_guard
from ..tree.base import RemoteActionNotImplemented
logger = logging.getLogger(__name__)
STATUS_OK = 1
STATUS_MISSING = 2
STATUS_NEW = 3
STATUS_DELETED = 4
STATUS_MAP = {
# (local_exists, remote_exists)
(True, True): STATUS_OK,
(False, False): STATUS_MISSING,
(True, False): STATUS_NEW,
(False, True): STATUS_DELETED,
}
class DirCacheError(DvcException):
def __init__(self, hash_):
super().__init__(
f"Failed to load dir cache for hash value: '{hash_}'."
)
class CloudCache:
"""Cloud cache class."""
DEFAULT_CACHE_TYPES = ["copy"]
CACHE_MODE = None
def __init__(self, tree):
self.tree = tree
self.repo = tree.repo
self.cache_types = tree.config.get("type") or copy(
self.DEFAULT_CACHE_TYPES
)
self.cache_type_confirmed = False
self._dir_info = {}
def get_dir_cache(self, hash_):
assert hash_
dir_info = self._dir_info.get(hash_)
if dir_info:
return dir_info
try:
dir_info = self.load_dir_cache(hash_)
except DirCacheError:
dir_info = []
self._dir_info[hash_] = dir_info
return dir_info
def load_dir_cache(self, hash_):
path_info = self.tree.hash_to_path_info(hash_)
try:
with self.tree.open(path_info, "r") as fobj:
d = json.load(fobj)
except (ValueError, FileNotFoundError) as exc:
raise DirCacheError(hash_) from exc
if not isinstance(d, list):
logger.error(
"dir cache file format error '%s' [skipping the file]",
path_info,
)
return []
if self.tree.PATH_CLS == WindowsPathInfo:
# only need to convert it for Windows
for info in d:
# NOTE: here is a BUG, see comment to .as_posix() below
relpath = info[self.tree.PARAM_RELPATH]
info[self.tree.PARAM_RELPATH] = relpath.replace(
"/", self.tree.PATH_CLS.sep
)
return d
def changed(self, path_info, hash_info):
"""Checks if data has changed.
A file is considered changed if:
- It doesn't exist on the working directory (was unlinked)
- Hash value is not computed (saving a new file)
- The hash value stored is different from the given one
- There's no file in the cache
Args:
path_info: dict with path information.
hash: expected hash value for this data.
Returns:
bool: True if data has changed, False otherwise.
"""
logger.debug(
"checking if '%s'('%s') has changed.", path_info, hash_info
)
if not self.tree.exists(path_info):
logger.debug("'%s' doesn't exist.", path_info)
return True
hash_ = hash_info.get(self.tree.PARAM_CHECKSUM)
if hash_ is None:
logger.debug("hash value for '%s' is missing.", path_info)
return True
if self.changed_cache(hash_):
logger.debug("cache for '%s'('%s') has changed.", path_info, hash_)
return True
typ, actual = self.tree.get_hash(path_info)
assert typ == self.tree.PARAM_CHECKSUM
if hash_ != actual:
logger.debug(
"hash value '%s' for '%s' has changed (actual '%s').",
hash_,
actual,
path_info,
)
return True
logger.debug("'%s' hasn't changed.", path_info)
return False
def link(self, from_info, to_info):
self._link(from_info, to_info, self.cache_types)
def _link(self, from_info, to_info, link_types):
assert self.tree.isfile(from_info)
self.tree.makedirs(to_info.parent)
self._try_links(from_info, to_info, link_types)
def _verify_link(self, path_info, link_type):
if self.cache_type_confirmed:
return
is_link = getattr(self.tree, f"is_{link_type}", None)
if is_link and not is_link(path_info):
self.tree.remove(path_info)
raise DvcException(f"failed to verify {link_type}")
self.cache_type_confirmed = True
@slow_link_guard
def _try_links(self, from_info, to_info, link_types):
while link_types:
link_method = getattr(self.tree, link_types[0])
try:
self._do_link(from_info, to_info, link_method)
self._verify_link(to_info, link_types[0])
return
except DvcException as exc:
logger.debug(
"Cache type '%s' is not supported: %s", link_types[0], exc
)
del link_types[0]
raise DvcException("no possible cache types left to try out.")
def _do_link(self, from_info, to_info, link_method):
if self.tree.exists(to_info):
raise DvcException(f"Link '{to_info}' already exists!")
link_method(from_info, to_info)
logger.debug(
"Created '%s': %s -> %s", self.cache_types[0], from_info, to_info,
)
def _save_file(self, path_info, tree, hash_, save_link=True, **kwargs):
assert hash_
cache_info = self.tree.hash_to_path_info(hash_)
if tree == self.tree:
if self.changed_cache(hash_):
self.tree.move(path_info, cache_info, mode=self.CACHE_MODE)
self.link(cache_info, path_info)
elif self.tree.iscopy(path_info) and self._cache_is_copy(
path_info
):
# Default relink procedure involves unneeded copy
self.tree.unprotect(path_info)
else:
self.tree.remove(path_info)
self.link(cache_info, path_info)
if save_link:
self.tree.state.save_link(path_info)
# we need to update path and cache, since in case of reflink,
# or copy cache type moving original file results in updates on
# next executed command, which causes md5 recalculation
self.tree.state.save(path_info, hash_)
else:
if self.changed_cache(hash_):
with tree.open(path_info, mode="rb") as fobj:
# if tree has fetch enabled, DVC out will be fetched on
# open and we do not need to read/copy any data
if not (
tree.isdvc(path_info, strict=False) and tree.fetch
):
self.tree.copy_fobj(fobj, cache_info)
callback = kwargs.get("download_callback")
if callback:
callback(1)
self.tree.state.save(cache_info, hash_)
return {self.tree.PARAM_CHECKSUM: hash_}
def _cache_is_copy(self, path_info):
"""Checks whether cache uses copies."""
if self.cache_type_confirmed:
return self.cache_types[0] == "copy"
if set(self.cache_types) <= {"copy"}:
return True
workspace_file = path_info.with_name("." + uuid())
test_cache_file = self.tree.path_info / ".cache_type_test_file"
if not self.tree.exists(test_cache_file):
with self.tree.open(test_cache_file, "wb") as fobj:
fobj.write(bytes(1))
try:
self.link(test_cache_file, workspace_file)
finally:
self.tree.remove(workspace_file)
self.tree.remove(test_cache_file)
self.cache_type_confirmed = True
return self.cache_types[0] == "copy"
def _save_dir(self, path_info, tree, hash_, save_link=True, **kwargs):
dir_info = self.get_dir_cache(hash_)
for entry in Tqdm(
dir_info, desc="Saving " + path_info.name, unit="file"
):
entry_info = path_info / entry[self.tree.PARAM_RELPATH]
entry_hash = entry[self.tree.PARAM_CHECKSUM]
self._save_file(
entry_info, tree, entry_hash, save_link=False, **kwargs
)
if save_link:
self.tree.state.save_link(path_info)
if self.tree.exists(path_info):
self.tree.state.save(path_info, hash_)
cache_info = self.tree.hash_to_path_info(hash_)
self.tree.state.save(cache_info, hash_)
return {self.tree.PARAM_CHECKSUM: hash_}
def save(self, path_info, tree, hash_info, save_link=True, **kwargs):
if path_info.scheme != self.tree.scheme:
raise RemoteActionNotImplemented(
f"save {path_info.scheme} -> {self.tree.scheme}",
self.tree.scheme,
)
hash_ = hash_info[self.tree.PARAM_CHECKSUM]
return self._save(path_info, tree, hash_, save_link, **kwargs)
def _save(self, path_info, tree, hash_, save_link=True, **kwargs):
to_info = self.tree.hash_to_path_info(hash_)
logger.debug("Saving '%s' to '%s'.", path_info, to_info)
if tree.isdir(path_info):
return self._save_dir(path_info, tree, hash_, save_link, **kwargs)
return self._save_file(path_info, tree, hash_, save_link, **kwargs)
# Override to return path as a string instead of PathInfo for clouds
# which support string paths (see local)
def hash_to_path(self, hash_):
return self.tree.hash_to_path_info(hash_)
def changed_cache_file(self, hash_):
"""Compare the given hash with the (corresponding) actual one.
- Use `State` as a cache for computed hashes
+ The entries are invalidated by taking into account the following:
* mtime
* inode
* size
* hash
- Remove the file from cache if it doesn't match the actual hash
"""
# Prefer string path over PathInfo when possible due to performance
cache_info = self.hash_to_path(hash_)
if self.tree.is_protected(cache_info):
logger.debug(
"Assuming '%s' is unchanged since it is read-only", cache_info
)
return False
_, actual = self.tree.get_hash(cache_info)
logger.debug(
"cache '%s' expected '%s' actual '%s'", cache_info, hash_, actual,
)
if not hash_ or not actual:
return True
if actual.split(".")[0] == hash_.split(".")[0]:
# making cache file read-only so we don't need to check it
# next time
self.tree.protect(cache_info)
return False
if self.tree.exists(cache_info):
logger.warning("corrupted cache file '%s'.", cache_info)
self.tree.remove(cache_info)
return True
def _changed_dir_cache(self, hash_, path_info=None, filter_info=None):
if self.changed_cache_file(hash_):
return True
for entry in self.get_dir_cache(hash_):
entry_hash = entry[self.tree.PARAM_CHECKSUM]
if path_info and filter_info:
entry_info = path_info / entry[self.tree.PARAM_RELPATH]
if not entry_info.isin_or_eq(filter_info):
continue
if self.changed_cache_file(entry_hash):
return True
return False
def changed_cache(self, hash_, path_info=None, filter_info=None):
if self.tree.is_dir_hash(hash_):
return self._changed_dir_cache(
hash_, path_info=path_info, filter_info=filter_info
)
return self.changed_cache_file(hash_)
def already_cached(self, path_info):
_, current = self.tree.get_hash(path_info)
if not current:
return False
return not self.changed_cache(current)
def safe_remove(self, path_info, force=False):
if not self.tree.exists(path_info):
return
if not force and not self.already_cached(path_info):
msg = (
"file '{}' is going to be removed."
" Are you sure you want to proceed?".format(str(path_info))
)
if not prompt.confirm(msg):
raise ConfirmRemoveError(str(path_info))
self.tree.remove(path_info)
def _checkout_file(
self, path_info, hash_, force, progress_callback=None, relink=False
):
"""The file is changed we need to checkout a new copy"""
added, modified = True, False
cache_info = self.tree.hash_to_path_info(hash_)
if self.tree.exists(path_info):
logger.debug("data '%s' will be replaced.", path_info)
self.safe_remove(path_info, force=force)
added, modified = False, True
self.link(cache_info, path_info)
self.tree.state.save_link(path_info)
self.tree.state.save(path_info, hash_)
if progress_callback:
progress_callback(str(path_info))
return added, modified and not relink
def _checkout_dir(
self,
path_info,
hash_,
force,
progress_callback=None,
relink=False,
filter_info=None,
):
added, modified = False, False
# Create dir separately so that dir is created
# even if there are no files in it
if not self.tree.exists(path_info):
added = True
self.tree.makedirs(path_info)
dir_info = self.get_dir_cache(hash_)
logger.debug("Linking directory '%s'.", path_info)
for entry in dir_info:
relative_path = entry[self.tree.PARAM_RELPATH]
entry_hash = entry[self.tree.PARAM_CHECKSUM]
entry_cache_info = self.tree.hash_to_path_info(entry_hash)
entry_info = path_info / relative_path
if filter_info and not entry_info.isin_or_eq(filter_info):
continue
entry_hash_info = {self.tree.PARAM_CHECKSUM: entry_hash}
if relink or self.changed(entry_info, entry_hash_info):
modified = True
self.safe_remove(entry_info, force=force)
self.link(entry_cache_info, entry_info)
self.tree.state.save(entry_info, entry_hash)
if progress_callback:
progress_callback(str(entry_info))
modified = (
self._remove_redundant_files(path_info, dir_info, force)
or modified
)
self.tree.state.save_link(path_info)
self.tree.state.save(path_info, hash_)
# relink is not modified, assume it as nochange
return added, not added and modified and not relink
def _remove_redundant_files(self, path_info, dir_info, force):
existing_files = set(self.tree.walk_files(path_info))
needed_files = {
path_info / entry[self.tree.PARAM_RELPATH] for entry in dir_info
}
redundant_files = existing_files - needed_files
for path in redundant_files:
self.safe_remove(path, force)
return bool(redundant_files)
def checkout(
self,
path_info,
hash_info,
force=False,
progress_callback=None,
relink=False,
filter_info=None,
):
if path_info.scheme not in ["local", self.tree.scheme]:
raise NotImplementedError
hash_ = hash_info.get(self.tree.PARAM_CHECKSUM)
failed = None
skip = False
if not hash_:
logger.warning(
"No file hash info found for '%s'. " "It won't be created.",
path_info,
)
self.safe_remove(path_info, force=force)
failed = path_info
elif not relink and not self.changed(path_info, hash_info):
logger.debug("Data '%s' didn't change.", path_info)
skip = True
elif self.changed_cache(
hash_, path_info=path_info, filter_info=filter_info
):
logger.warning(
"Cache '%s' not found. File '%s' won't be created.",
hash_,
path_info,
)
self.safe_remove(path_info, force=force)
failed = path_info
if failed or skip:
if progress_callback:
progress_callback(
str(path_info),
self.get_files_number(
self.tree.path_info, hash_, filter_info
),
)
if failed:
raise CheckoutError([failed])
return
logger.debug("Checking out '%s' with cache '%s'.", path_info, hash_)
return self._checkout(
path_info, hash_, force, progress_callback, relink, filter_info,
)
def _checkout(
self,
path_info,
hash_,
force=False,
progress_callback=None,
relink=False,
filter_info=None,
):
if not self.tree.is_dir_hash(hash_):
return self._checkout_file(
path_info, hash_, force, progress_callback, relink
)
return self._checkout_dir(
path_info, hash_, force, progress_callback, relink, filter_info
)
def get_files_number(self, path_info, hash_, filter_info):
from funcy.py3 import ilen
if not hash_:
return 0
if not self.tree.is_dir_hash(hash_):
return 1
if not filter_info:
return len(self.get_dir_cache(hash_))
return ilen(
filter_info.isin_or_eq(path_info / entry[self.tree.PARAM_CHECKSUM])
for entry in self.get_dir_cache(hash_)
)
def _to_dict(self, dir_info):
return {
entry[self.tree.PARAM_RELPATH]: entry[self.tree.PARAM_CHECKSUM]
for entry in dir_info
}
def _from_dict(self, dir_dict):
return [
{
self.tree.PARAM_RELPATH: relpath,
self.tree.PARAM_CHECKSUM: checksum,
}
for relpath, checksum in dir_dict.items()
]
@staticmethod
def _diff(ancestor, other, allow_removed=False):
from dictdiffer import diff
allowed = ["add"]
if allow_removed:
allowed.append("remove")
result = list(diff(ancestor, other))
for typ, _, _ in result:
if typ not in allowed:
raise MergeError(
"unable to auto-merge directories with diff that contains "
f"'{typ}'ed files"
)
return result
def _merge_dirs(self, ancestor_info, our_info, their_info):
from operator import itemgetter
from dictdiffer import patch
ancestor = self._to_dict(ancestor_info)
our = self._to_dict(our_info)
their = self._to_dict(their_info)
our_diff = self._diff(ancestor, our)
if not our_diff:
return self._from_dict(their)
their_diff = self._diff(ancestor, their)
if not their_diff:
return self._from_dict(our)
# make sure there are no conflicting files
self._diff(our, their, allow_removed=True)
merged = patch(our_diff + their_diff, ancestor, in_place=True)
# Sorting the list by path to ensure reproducibility
return sorted(
self._from_dict(merged), key=itemgetter(self.tree.PARAM_RELPATH)
)
def merge(self, ancestor_info, our_info, their_info):
assert our_info
assert their_info
if ancestor_info:
ancestor_hash = ancestor_info[self.tree.PARAM_CHECKSUM]
ancestor = self.get_dir_cache(ancestor_hash)
else:
ancestor = []
our_hash = our_info[self.tree.PARAM_CHECKSUM]
our = self.get_dir_cache(our_hash)
their_hash = their_info[self.tree.PARAM_CHECKSUM]
their = self.get_dir_cache(their_hash)
merged = self._merge_dirs(ancestor, our, their)
typ, merged_hash = self.tree.save_dir_info(merged)
return {typ: merged_hash}
| [
"noreply@github.com"
] | noreply@github.com |
20b8d06232b4b6e0ab720a34fa4445130ac824f2 | 387d6549a089449c33a40def2cde6aa9287408e4 | /utils/mapiou.py | 0cfd4aa8a2acea053e77c4bca4785772e7611a6f | [] | no_license | rathinad/kgl | b5e4b377ce01efbd6512d2c28edd71b5bb8a7879 | 2d744734d4e27d7a0521bf9b5aad6ab8a6aa1fd4 | refs/heads/master | 2020-03-31T10:53:06.624858 | 2018-10-09T03:28:33 | 2018-10-09T03:28:33 | 152,153,976 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,075 | py | import numpy as np
def printvalues(aa, bb):
print('Entering printvalues')
sumtotal=0.0
sumtotal2=0.0
scores = np.zeros((0,))
#aaa=numpy.array([numpy.array(xi) for xi in aa])
# bbb=numpy.array([numpy.array(xi) for xi in bb])
for d in aa:
print('Printing first detection')
scores = np.append(scores, d[4])
sumtotal = map_iou(np.expand_dims(d, axis=0), bb, scores)
print ('Printing map for image: ')
print(sumtotal)
sumtotal2+=sumtotal
print('printing total sum')
print(sumtotal2)
#print('length of detections')
#print(len(aa))
print('Exiting printvalues')
# helper function to calculate IoU
def iou(box1, box2):
x11, y11, x12, y12 = box1
x21, y21, x22, y22, sc = box2
#x11, y11, w1, h1 = box1
#x21, y21, w2, h2, sc = box2
#assert w1 * h1 > 0
#assert w2 * h2 > 0
#x12, y12 = x11 + w1, y11 + h1
#x22, y22 = x21 + w2, y21 + h2
#area1, area2 = w1 * h1, w2 * h2
area1, area2 = (x12-x11) * (y12-y11), (x22-x21) * (y22-y21)
xi1, yi1, xi2, yi2 = max([x11, x21]), max([y11, y21]), min([x12, x22]), min([y12, y22])
if xi2 <= xi1 or yi2 <= yi1:
return 0
else:
intersect = (xi2-xi1) * (yi2-yi1)
union = area1 + area2 - intersect
return intersect / union
# simple test
#box1 = [100, 100, 200, 200]
#box2 = [100, 100, 300, 200]
#print(iou(box1, box2))
def map_iou(boxes_true, boxes_pred, scores, thresholds = [0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75]):
"""
Mean average precision at differnet intersection over union (IoU) threshold
input:
boxes_true: Mx4 numpy array of ground true bounding boxes of one image.
bbox format: (x1, y1, w, h)
boxes_pred: Nx4 numpy array of predicted bounding boxes of one image.
bbox format: (x1, y1, w, h)
scores: length N numpy array of scores associated with predicted bboxes
thresholds: IoU shresholds to evaluate mean average precision on
output:
map: mean average precision of the image
"""
# According to the introduction, images with no ground truth bboxes will not be
# included in the map score unless there is a false positive detection (?)
# return None if both are empty, don't count the image in final evaluation (?)
if len(boxes_true) == 0 and len(boxes_pred) == 0:
return None
assert boxes_true.shape[1] == 4 or boxes_pred.shape[1] == 4, "boxes should be 2D arrays with shape[1]=4"
if len(boxes_pred):
assert len(scores) == len(boxes_pred), "boxes_pred and scores should be same length"
# sort boxes_pred by scores in decreasing order
boxes_pred = boxes_pred[np.argsort(scores)[::-1], :]
map_total = 0
# loop over thresholds
for t in thresholds:
matched_bt = set()
tp, fn = 0, 0
for i, bt in enumerate(boxes_true):
matched = False
for j, bp in enumerate(boxes_pred):
#print('Printing j, bt and bp')
#print(j)
#print(bt)
#print(bp)
miou = iou(bt, bp)
if miou >= t and not matched and j not in matched_bt:
matched = True
tp += 1 # bt is matched for the first time, count as TP
matched_bt.add(j)
if not matched:
fn += 1 # bt has no match, count as FN
fp = len(boxes_pred) - len(matched_bt) # FP is the bp that not matched to any bt
m = tp / (tp + fn + fp)
#print('Threshold : %s tp : %s fn : %s fp : %s map %s' % (t,tp,fn,fp,m))
map_total += m
k = map_total / len(thresholds)
#print ('About to return')
#print(k)
return k
# simple test
#boxes_true = np.array([[100, 100, 200, 200]])
#boxes_pred = np.array([[100, 100, 300, 200]])
#scores = [0.9]
#map_iou(boxes_true, boxes_pred, scores) | [
"noreply@github.com"
] | noreply@github.com |
fbd377cdb9cdae0e1c6af09c010ce94ab99a22f3 | 6dec0cbff8fe62be2cf3d352884c32fad3dfdfc9 | /brca/views.py | 876985a96f7d0a85ac04aab2f286903757c02520 | [] | no_license | sarahjamieson/small_apps | 2c74f4dc176e29862c08c6719fe4ba310ccf7f5b | 345e669b1ab02d435e3ca6e5f2bb220c98d0feda | refs/heads/master | 2021-05-03T15:22:31.030427 | 2019-07-24T09:52:07 | 2019-07-24T09:52:07 | 65,740,293 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,163 | py | from django.shortcuts import render, HttpResponseRedirect, HttpResponse
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.core.urlresolvers import reverse_lazy
from brca.forms import UploadFileForm
import os
from brca.extraction.brca import run_brca_extraction
import StringIO
import zipfile
import time
import socket
import urllib2
# http://stackoverflow.com/questions/15084597/django-error-message-for-login-form - try this for login forms
def user_login(request):
logout(request)
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user:
if user.is_active:
login(request, user)
return HttpResponseRedirect(reverse_lazy('home'))
else:
messages.ERROR(request, "Disabled account.")
else:
messages.ERROR(request, "Invalid login details.")
return render(request, 'brca/login.html')
def user_logout(request):
logout(request)
return HttpResponseRedirect(reverse_lazy('home'))
def handle_uploaded_file(filename):
"""Uploads file in chunks.
:param filename: file to upload.
:return filepath: location of the file which has been uploaded.
"""
filepath = os.path.join('brca/extraction/inputs/', filename.name)
with open(filepath, 'wb') as destination: # wb = file opened for writing in binary mode.
for chunk in filename.chunks():
destination.write(chunk)
return filepath
def download(request):
filepaths = ["brca/extraction/outputs/brca_output_main.csv",
"brca/extraction/outputs/brca_output_polys.csv",
"brca/extraction/outputs/brca_errors.txt"]
zip_subdir = "BrcaExtraction_%s" % time.strftime("%d%m%Y")
zip_filename = "%s.zip" % zip_subdir
s = StringIO.StringIO()
zf = zipfile.ZipFile(s, "w")
for fpath in filepaths:
fdir, fname = os.path.split(fpath)
zip_path = os.path.join(zip_subdir, fname)
zf.write(fpath, zip_path)
zf.close()
response = HttpResponse(s.getvalue(), content_type="application/x-zip-compressed")
response['Content-Disposition'] = 'attachment; filename=%s' % zip_filename
return response
@login_required(login_url=reverse_lazy('login'))
def home(request):
if request.method == 'POST':
form = UploadFileForm(request.POST, request.FILES)
if form.is_valid():
filepath = handle_uploaded_file(request.FILES['file'])
try:
run_brca_extraction(filepath, "brca_output", "errors.txt", "/media/sf_sarah_share/BRCA_poly_list.xls")
return render(request, 'brca/home.html', {'form': form, 'complete': True})
except (socket.error, urllib2.URLError):
return render(request, 'brca/home.html', {'form': form, 'error': True})
else:
form = UploadFileForm()
return render(request, 'brca/home.html', {'form': form})
| [
"shjn@localhost.localdomain"
] | shjn@localhost.localdomain |
9583bb87d6d50958ce11e645470522df8b013170 | 144a2a6f2e4a40d746f4908686e5080fcd3ebd79 | /drl-gym/logger.py | 43ba5d6ce80f72d0480a619f413d74c2c2a0b651 | [] | no_license | cxttli/ReinforcementLearningCode | ad6eef7c2980f0a4df40429e6deeadf82bed4030 | b5d092f2b760fafe2a9ffaf31da0e6285441a54f | refs/heads/master | 2021-10-09T18:31:12.773446 | 2019-01-02T06:20:01 | 2019-01-02T06:20:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,044 | py | #!usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import os
def write(file, value):
file = open(file_path, 'a')
if isinstance(value, int):
file.write()
'''
作用在于:
store_resutl(): 进行数据储存;
reveal_last(): 定期输出该期间的平均值;
write_last(): 定期将平均值结果写入文件,防止数据丢失;
write_final(): 将每次保存的数据进行写入,不止是平均值。
'''
class Logger:
def __init__(self):
self.result_dict = dict()
self.scale_dict = dict()
self.result_iter = dict() # used in reveal_last
self.result_last_iter = dict() # used in reveal_last
self.flag_title = 1
self.final_flag_title = 1
def store_num(self, **kwargs):
for k,v in kwargs.items():
if not(k in self.scale_dict.keys()):
self.scale_dict[k] = []
self.result_iter[k] = 0
if not isinstance(v, np.ndarray):
v = np.array(v)
self.scale_dict[k].append(v)
self.result_iter[k] += 1
def store_result(self, **kwargs): # warning: kwargs must be a dict or using "="
for k,v in kwargs.items():
if not(k in self.result_dict.keys()):
self.result_dict[k] = []
self.result_iter[k] = 0
self.result_last_iter[k] = 0
if not isinstance(v, np.ndarray):
v = np.array(v)
self.result_dict[k].append(v)
self.result_iter[k] += 1
# only reveal, without write file
def reveal_last(self, *args):
# auto reveal last k mean results, where k = reveal_period in main_loop, s.t. k = 1 ...
if len(args) > 0:
for key in args:
if key in self.scale_dict.keys():
print(str(key) , ":" , self.scale_dict[key][self.result_iter[key]-1], end=" , ")
elif key in self.result_dict.keys():
value_last = np.mean(self.result_dict[key][self.result_last_iter[key]:self.result_iter[key]], axis=0)
print(str(key) , ":" , value_last, end=" , ")
else:
raise KeyError(key)
self.result_last_iter[key] = self.result_iter[key]
print("\n")
# reveal all results (lat k mean results)
else:
for key in self.scale_dict.keys():
print(str(key) , ":" , self.scale_dict[key][self.result_iter[key]-1], end=" , ")
for key in self.result_dict.keys():
value_last = np.mean(self.result_dict[key][self.result_last_iter[key]:self.result_iter[key]], axis=0)
print(str(key) , ":" , value_last, end=" , ")
self.result_last_iter[key] = self.result_iter[key]
print(" ")
# # test... dont use
# def reveal_last_value(self, *args):
# # auto reveal last k mean results, where k = reveal_period in main_loop, s.t. k = 1 ...
# if len(args) > 0:
# # reveal
# for key in args:
# assert key in self.result_dict.keys()
# value_last = np.mean(self.result_dict[key][self.result_last_iter[key]:self.result_iter[key]], axis=0)
# print(str(key) , ":" , value_last, end=" , ")
# self.result_last_iter[key] = self.result_iter[key]
# print(" ")
# # reveal all results (lat k mean results)
# else:
# for key in self.result_dict.keys():
# value_last = np.mean(self.result_dict[key][self.result_last_iter[key]:self.result_iter[key]], axis=0)
# print(str(key) , ":" , value_last, end=" , ")
# self.result_last_iter[key] = self.result_iter[key]
# print(" ")
# only write, without reveal
def write_last(self, save_path=os.getcwd(), save_name='result.csv', write_period=1):
self.fl=open(save_path + '/' + save_name, 'a')
# TODO-1: a judge --> diff save_name for diff files
# TODO-2: only write all, next adding *args
# write title
if self.flag_title:
self._write_title(self.fl, self.scale_dict)
self._write_title(self.fl, self.result_dict)
self.fl.write("\n")
self.flag_title = 0
# write value
for key in self.scale_dict.keys():
space_num = self.scale_dict[key][0].size
if space_num == 1:
self.fl.write(str(self.scale_dict[key][self.result_iter[key]-1]) + ",")
else:
for j in range(space_num):
self.fl.write(str(self.scale_dict[key][self.result_iter[key]-1][j]) + ",")
for key in self.result_dict.keys():
space_num = self.result_dict[key][0].size
if space_num == 1:
self.fl.write(str(np.mean(self.result_dict[key][-write_period:], axis=0)) + ",")
else:
for j in range(space_num):
# print(np.mean(self.result_dict[key][-write_period:], axis=0))
# assert True
self.fl.write(str(np.mean(self.result_dict[key][-write_period:], axis=0)[j]) + ",")
# print(str(self.result_dict[key][iter_key][j]))
# print(str(np.mean(self.result_dict[key], axis=0)[j]))
self.fl.write("\n")
# self.fl.flush()
self.fl.close()
def _write_title(self, file, key_list):
for key in key_list.keys():
# print(key_list[key][0])
# print(key_list[key])
# print(key)
space_num = key_list[key][0].size
# print(space_num)
file.write(str(key) + ",")
for j in range(space_num - 1):
file.write(" " + ",")
def reveal_all(self, *args):
# reveal some results based on args
if len(args) > 0:
for key in args:
assert key in self.result_dict.keys()
print(str(key) , ": " , self.result_dict[key])
# reveal all results
else:
for key in self.result_dict.keys():
print(str(key) , ": " , self.result_dict[key])
def write_final(self, save_path=os.getcwd(), save_name='result_all.csv'):
fl=open(save_path + '/' + save_name, 'w')
for key in self.result_dict.keys():
space_num = self.result_dict[key][0].size
value_num = len(self.result_dict[key])
# print(space_num)
fl.write(str(key) + ",")
for j in range(space_num - 1):
fl.write(" " + ",")
fl.write("\n")
# write value
for iter_key in range(value_num):
for key in self.result_dict.keys():
space_num = self.result_dict[key][0].size
if space_num == 1:
fl.write(str(self.result_dict[key][iter_key]) + ",")
else:
for j in range(space_num):
fl.write(str(self.result_dict[key][iter_key][j]) + ",") # TODO-error
# print(str(self.result_dict[key][iter_key][j]))
fl.write("\n")
fl.close()
# # 使用测试:
# Rcd = Logger()
# for i in range(5):
# a = np.random.randn(1)
# b = np.random.randn(2)
# c = np.random.randn(1)
# Rcd.store_result(resA=i)
# Rcd.store_result(resB=b)
# Rcd.store_result(resC=c)
# if (i+1) % 2 == 0:
# # Rcd.write_last(write_period=2)
# print(a)
# print("last")
# Rcd.reveal_last("resA")
# Rcd.write_final()
# a = np.array(np.random.randn(1,2))
# print(type(a))
# print(a)
# print(a.shape)
# a = []
# for j in range(10):
# a.append(j)
# if (j+1) % 2 == 0:
# print("a",a)
# print("seg:",a[-2:])
# print(a)
# # print(a[1:3])
# # print(np.mean(a[1:3]))
# k = 3
# print(a[-k:]) | [
"wuhaolin@126.com"
] | wuhaolin@126.com |
79d0ea95708a040a0b370b8133a364f77aa32bd0 | 8c78491df7b38f23a5176eac558060fbd8131478 | /Book/chap6/Supporting Materials/quadsolnsFull.py | 27b93d35ce7e40d93dd3fa438a25de45c88eaa8e | [
"CC0-1.0"
] | permissive | phaustin/pyman | 84e26228d2cbafb90d0476a7bf0d03c0a57fad3f | ddcebfe85313c854bf71acc9daae13f2dc1c2b6f | refs/heads/master | 2021-01-17T08:46:12.817978 | 2020-02-03T16:52:48 | 2020-02-03T16:52:48 | 40,729,448 | 1 | 1 | CC0-1.0 | 2020-02-03T16:52:49 | 2015-08-14T18:09:20 | Jupyter Notebook | UTF-8 | Python | false | false | 441 | py | import numpy as np
a = float(raw_input("What is the coefficients a? "))
b = float(raw_input("What is the coefficients b? "))
c = float(raw_input("What is the coefficients c? "))
d = b*b - 4.*a*c
if d >= 0.0:
print("Solutions are real")
elif b == 0.0:
print("Solutions are imaginary")
else:
print("Solutions are complex")
a, b, c = a+0.j, b+0.j, c+0.j
dd = np.sqrt(b*b-4*a*c)
xp = (-b + dd)/a
xm = (-b - dd)/a
print(xm, xp)
| [
"pine@nyu.edu"
] | pine@nyu.edu |
cc7764cc0012e3eab4a8c87739f2c1c8c1059480 | e561681c5d4c7bd795e64c19122271909b6e67af | /config/urls.py | 0043f4327b0d070bb6b7e9b11648c151cb363a26 | [] | no_license | gustavodearmas/Sistema-de-Facturacion-Django | 9ef78446483f433874519952623f3f38c32a76a1 | a9cab6d820ef9f11263ea723594b9a8c74fae59e | refs/heads/master | 2023-07-29T11:14:17.289225 | 2021-09-17T05:50:02 | 2021-09-17T05:50:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,282 | py | """config URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.contrib.auth.decorators import login_required
from django.urls import path, include
from core.homepage.views import IndexView
from core.login.views import LoginFormView #Login, logoutUser
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('erp/', include('core.erp.urls')),
path('', IndexView.as_view(), name='index'),
path('login/', include('core.login.urls')),
#path('logout/', login_required(logoutUser), name='logout'),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"bocettodigital@gmail.com"
] | bocettodigital@gmail.com |
26040b78b0653089fc79de6e48455653ef0d8888 | 2e760c5c926a2a72107d92c1692bbb76cd987dab | /valeur_aleatoire.py | 89e0ea9b9afdac8b6f899be25be16561bfc10ab0 | [] | no_license | JPdiedhiou/git_project | d1a24dd320ca930168a29f4a379e733653541fff | 9885c5d1871e66f689d0034934d76a93a67aab60 | refs/heads/master | 2020-05-07T08:48:52.446414 | 2019-04-11T12:27:30 | 2019-04-11T12:27:30 | 180,163,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | import random
nbre_alea = random.randint(0, 9)
rep_user = None
while rep_user != nbre_alea:
rep_user = int(input("Entrer une valeur entre 0 et 9 :"))
if rep_user < nbre_alea:
print("Le nombre aléatoire est plus petit")
elif rep_user > nbre_alea:
print("Le nombre aléatoire est plus petit")
else:
print("Bravo !! Tu as gagné ")
| [
"diedhioujp@outlook.com"
] | diedhioujp@outlook.com |
e5ce1f784c016a82ea0bdab7ee28be6530a79009 | cf4b0eaca92048c6fff0de933b69b6322f17a073 | /build/lib.linux-x86_64-2.7/keystoneclient/v3/client.py | 8ab4f4181a0e0b4b5542d4814e9d047f0ef345d4 | [
"Apache-2.0",
"BSD-2-Clause"
] | permissive | MaheshZ/maheshsawaiker | d9935d40b0da2042bc696423418f2a69db495ba8 | b1b5875bfd48a51b6d1eb9c1b76e8eefd41767ff | refs/heads/master | 2021-03-12T22:16:45.220953 | 2014-12-31T05:06:12 | 2014-12-31T05:06:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,314 | py | # Copyright 2011 Nebula, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient.auth.identity import v3 as v3_auth
from keystoneclient import exceptions
from keystoneclient import httpclient
from keystoneclient.openstack.common import jsonutils
from keystoneclient.v3.contrib import endpoint_filter
from keystoneclient.v3.contrib import federation
from keystoneclient.v3.contrib import oauth1
from keystoneclient.v3.contrib import trusts
from keystoneclient.v3 import credentials
from keystoneclient.v3 import domains
from keystoneclient.v3 import endpoints
from keystoneclient.v3 import groups
from keystoneclient.v3 import policies
from keystoneclient.v3 import projects
from keystoneclient.v3 import regions
from keystoneclient.v3 import role_assignments
from keystoneclient.v3 import roles
from keystoneclient.v3 import services
from keystoneclient.v3 import users
import logging
_logger = logging.getLogger(__name__)
class Client(httpclient.HTTPClient):
"""Client for the OpenStack Identity API v3.
:param string user_id: User ID for authentication. (optional)
:param string username: Username for authentication. (optional)
:param string user_domain_id: User's domain ID for authentication.
(optional)
:param string user_domain_name: User's domain name for authentication.
(optional)
:param string password: Password for authentication. (optional)
:param string token: Token for authentication. (optional)
:param string domain_id: Domain ID for domain scoping. (optional)
:param string domain_name: Domain name for domain scoping. (optional)
:param string project_id: Project ID for project scoping. (optional)
:param string project_name: Project name for project scoping. (optional)
:param string project_domain_id: Project's domain ID for project
scoping. (optional)
:param string project_domain_name: Project's domain name for project
scoping. (optional)
:param string tenant_name: Tenant name. (optional)
The tenant_name keyword argument is deprecated,
use project_name instead.
:param string tenant_id: Tenant id. (optional)
The tenant_id keyword argument is deprecated,
use project_id instead.
:param string auth_url: Identity service endpoint for authorization.
:param string region_name: Name of a region to select when choosing an
endpoint from the service catalog.
:param string endpoint: A user-supplied endpoint URL for the identity
service. Lazy-authentication is possible for API
service calls if endpoint is set at
instantiation. (optional)
:param integer timeout: Allows customization of the timeout for client
http requests. (optional)
Example::
>>> from keystoneclient.v3 import client
>>> keystone = client.Client(user_domain_name=DOMAIN_NAME,
... username=USER,
... password=PASS,
... project_domain_name=PROJECT_DOMAIN_NAME,
... project_name=PROJECT_NAME,
... auth_url=KEYSTONE_URL)
...
>>> keystone.projects.list()
...
>>> user = keystone.users.get(USER_ID)
>>> user.delete()
"""
version = 'v3'
def __init__(self, **kwargs):
"""Initialize a new client for the Keystone v3 API."""
super(Client, self).__init__(**kwargs)
self.credentials = credentials.CredentialManager(self)
self.endpoint_filter = endpoint_filter.EndpointFilterManager(self)
self.endpoints = endpoints.EndpointManager(self)
self.domains = domains.DomainManager(self)
self.federation = federation.FederationManager(self)
self.groups = groups.GroupManager(self)
self.oauth1 = oauth1.create_oauth_manager(self)
self.policies = policies.PolicyManager(self)
self.projects = projects.ProjectManager(self)
self.regions = regions.RegionManager(self)
self.role_assignments = role_assignments.RoleAssignmentManager(self)
self.roles = roles.RoleManager(self)
self.services = services.ServiceManager(self)
self.users = users.UserManager(self)
self.trusts = trusts.TrustManager(self)
# DEPRECATED: if session is passed then we go to the new behaviour of
# authenticating on the first required call.
if 'session' not in kwargs and self.management_url is None:
self.authenticate()
def serialize(self, entity):
return jsonutils.dumps(entity, sort_keys=True)
def process_token(self, **kwargs):
"""Extract and process information from the new auth_ref.
And set the relevant authentication information.
"""
super(Client, self).process_token(**kwargs)
if self.auth_ref.domain_scoped:
if not self.auth_ref.domain_id:
raise exceptions.AuthorizationFailure(
"Token didn't provide domain_id")
self._process_management_url(kwargs.get('region_name'))
self.domain_name = self.auth_ref.domain_name
self.domain_id = self.auth_ref.domain_id
if self._management_url:
self._management_url = self._management_url.replace('/v2.0', '/v3')
def get_raw_token_from_identity_service(self, auth_url, user_id=None,
username=None,
user_domain_id=None,
user_domain_name=None,
password=None,
domain_id=None, domain_name=None,
project_id=None, project_name=None,
project_domain_id=None,
project_domain_name=None,
token=None,
trust_id=None,
**kwargs):
"""Authenticate against the v3 Identity API.
If password and token methods are both provided then both methods will
be used in the request.
:returns: access.AccessInfo if authentication was successful.
:raises: AuthorizationFailure if unable to authenticate or validate
the existing authorization token
:raises: Unauthorized if authentication fails due to invalid token
"""
try:
if auth_url is None:
raise ValueError("Cannot authenticate without an auth_url")
auth_methods = []
if token:
auth_methods.append(v3_auth.TokenMethod(token=token))
if password:
m = v3_auth.PasswordMethod(user_id=user_id,
username=username,
user_domain_id=user_domain_id,
user_domain_name=user_domain_name,
password=password)
auth_methods.append(m)
if not auth_methods:
msg = "A user and password or token is required."
raise exceptions.AuthorizationFailure(msg)
plugin = v3_auth.Auth(auth_url, auth_methods,
trust_id=trust_id,
domain_id=domain_id,
domain_name=domain_name,
project_id=project_id,
project_name=project_name,
project_domain_id=project_domain_id,
project_domain_name=project_domain_name)
return plugin.get_auth_ref(self.session)
except (exceptions.AuthorizationFailure, exceptions.Unauthorized):
_logger.debug('Authorization failed.')
raise
except exceptions.EndpointNotFound:
msg = 'There was no suitable authentication url for this request'
raise exceptions.AuthorizationFailure(msg)
except Exception as e:
raise exceptions.AuthorizationFailure('Authorization failed: '
'%s' % e)
| [
"mahesh_sawaiker@persistent.co.in"
] | mahesh_sawaiker@persistent.co.in |
bc7c9459c0f70e88e0dde36873b792973860a896 | 1a24def8879972f21d846ffb3813632070e1cf12 | /Chapter06/0602fib-func.py | 79885098128099a23319588b0f10e18295f92798 | [] | no_license | mushahiroyuki/beginning-python | 03bb78c8d3f678ce39662a44046a308c99f29916 | 4d761d165203dbbe3604173c404f70a3eb791fd8 | refs/heads/master | 2023-08-16T12:44:01.336731 | 2023-07-26T03:41:22 | 2023-07-26T03:41:22 | 238,684,870 | 5 | 4 | null | 2023-09-06T18:34:01 | 2020-02-06T12:33:26 | Python | UTF-8 | Python | false | false | 659 | py | #@@range_begin(list1) # ←この行は無視してください。本文に引用するためのものです。
#ファイル名 Chapter06/0602fib-func.py
def fibs(num):
result = [0, 1]
for i in range(num-2):
result.append(result[-2] + result[-1])
return result
#@@range_end(list1) # ←この行は無視してください。本文に引用するためのものです。
#実行
#@@range_begin(list2) # ←この行は無視してください。本文に引用するためのものです。
print(fibs(10))
print(fibs(15))
#@@range_end(list2) # ←この行は無視してください。本文に引用するためのものです。
| [
"hmusha@gmail.com"
] | hmusha@gmail.com |
a69f31046ca1375d93c2580b8e063ae14697a3d1 | eecfb88eea01f9dbb20c34b70f56ccfc353ffa4c | /ml-scikit-learn/data/clustering/kmeans.py | 74422a5d3b77cfd31a40b1610967300cc49f0ad2 | [] | no_license | adgon92/py-datacamp | 22fa029c0d40fae6fbad1172d1541161520be386 | 6ab03c1bd704724f3cdea672248c846e3ad09b42 | refs/heads/master | 2021-01-19T13:33:43.184300 | 2017-03-04T18:21:32 | 2017-03-04T18:21:32 | 82,398,823 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 816 | py | from sklearn import cluster
class KMeansCluster:
_INIT = "k-means++"
_N_CLUSTERS = 10
_RANDOM_STATE = 42
def __init__(self, training_data):
self._training_data = training_data
self._clf = cluster.KMeans(
init=KMeansCluster._INIT,
n_clusters=KMeansCluster._N_CLUSTERS,
random_state=KMeansCluster._RANDOM_STATE
)
@property
def clf(self):
return self._clf
@property
def cluster_centers(self):
return self._clf.cluster_centers_
def fit_training_set(self):
self._clf.fit(self._training_data)
def predict_labels(self):
self._clf.predict(self._training_data)
def compute_cluster_centres_and_predict_cluster_index(self):
return self._clf.fit_predict(self._training_data)
| [
"adgon92@gmail.com"
] | adgon92@gmail.com |
94ee6a4e66acd0722e90563e3119991e1303d276 | 37e17c43dda0c4e3842f1158ae586b09f4c0a072 | /src/sd_client/sd_mic.py | 0b74b17dde541e186bcc6c85b83f845238788519 | [
"MIT"
] | permissive | Ryo-Ishibashi416/SP_1802 | 3472eb5e8d75d520a65f4b10006db75ad2fde82b | 46eab9b77b0ec06fb9ef1ba5962dfd7f06889a4f | refs/heads/master | 2020-08-22T03:42:29.174257 | 2019-01-17T16:15:26 | 2019-01-17T16:15:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,465 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
### Sound Detect Client for mic ###############################################
__author__ = "Yoshihiro Kajiki <y-kajiki@ah.jp.nec.com>"
__version__ = "3.11"
__date__ = "Sep 29, 2018"
### Usage: $ python sd_mic.py deviceID ###
###############################################################################
import os
import sys
import threading
import time
import tarfile
import shutil
import numpy as np
import pyaudio as pa
import requests
###############################################################################
def setAudio():
global chunk
global msgLevel
global audioEvent
# pyaudio
p_in = pa.PyAudio()
# find input device
if msgLevel > 2:
print('\nSearching a audio input device ..\n')
sys.stdout.flush()
recording_device = []
for i in range(p_in.get_device_count()):
maxInputChannels = p_in.get_device_info_by_index(i)['maxInputChannels']
if maxInputChannels > 0 and maxInputChannels < 32:
if msgLevel > 3:
print(' Found!: device_index = %d, maxInputChannels = %d' % (i, maxInputChannels))
print(' defaultSampleRate = %d\n\n' % fs)
fs = int(p_in.get_device_info_by_index(i)['defaultSampleRate'])
recording_device.append((i, fs))
# confirm
num_recording_device = len(recording_device)
if num_recording_device == 0:
print("\nError! Can't find any usuable sound input device.")
print(" Check your environment or try other computer.")
sys.exit(1)
if msgLevel > 2:
print('Your computer has %d recording device(s).' % num_recording_device)
sys.stdout.flush()
# check
maxDevice = -1
maxPower = 0
for (idx, fs) in recording_device:
if msgLevel > 2:
print('Checking device #', idx)
sys.stdout.flush()
inStream = startRecording(idx, fs, fs)
audioEvent.wait()
inStream.close()
# convert audio chunk into numpy.ndarray of float
npChunk = np.frombuffer(chunk, dtype=np.int16).astype(np.float) / 2**15
audioEvent.clear()
# Calc avarage intensity
power = 0
for s in npChunk:
power += abs(s)
power /= fs
if msgLevel > 2:
print(' power = ', power)
sys.stdout.flush()
# compair
if power > maxPower:
maxPower = power
maxDevice = idx
if maxDevice < 0:
print("\nError! Can't find any usuable sound input device.")
print(" All input device seems OFF.")
print(" Check your environment or try other computer.")
sys.exit(1)
if msgLevel > 2:
print("\nYour environment is OK.")
print(' use_device_index = ', maxDevice)
print(' SampleRate = ', fs)
sys.stdout.flush()
return maxDevice, fs
def startRecording(deviceIndex, fs, chunkSize):
global msgLevel
# pyaudio
p_in = pa.PyAudio()
bytes = 2
py_format = p_in.get_format_from_width(bytes)
channels = 1
# generate an input stream
inStream = p_in.open(format=py_format,
channels=channels,
rate=fs,
input=True,
frames_per_buffer=chunkSize,
input_device_index=deviceIndex,
stream_callback=callback)
inStream.start_stream()
if msgLevel > 2:
print('Set Microphone: ON\n')
sys.stdout.flush()
return inStream
def callback(in_data, frame_count, time_info, status):
global chunk
global recTime
global audioEvent
in_data = np.frombuffer(in_data, dtype=np.int16)
chunk = in_data.tobytes()
recTime = time.time()
audioEvent.set()
return (in_data, pa.paContinue)
def getSdk(deviceID):
global proxies
global msgLevel
if os.path.exists('sd_sdk.tar.gz'):
os.remove('sd_sdk.tar.gz')
request_url = 'https://' + mngServer + '/api/assets/sdk/' + deviceID + '/sd_sdk.tar.gz'
while True:
if msgLevel > 2:
print('Get SDK')
sys.stdout.flush()
try:
res = requests.get(request_url, timeout=5, stream=True, proxies=proxies)
if (res.status_code > 299):
print('Error! Can not get SDK.')
print(res.text)
sys.exit(1)
if msgLevel > 2:
print(' Got it!')
sys.stdout.flush()
break
except:
if msgLevel > 2:
print(' No responce. Wait for 2 sec.')
sys.stdout.flush()
time.sleep(2)
# save sdk
with open('sd_sdk.tar.gz', 'wb') as file:
res.raw.decode_content = True
shutil.copyfileobj(res.raw, file)
# expand tar
with tarfile.open('sd_sdk.tar.gz', 'r') as tar:
tar.extractall()
os.remove('sd_sdk.tar.gz')
###############################################################################
if __name__ == "__main__":
### Read and Set Configuration ###
import sd_config
deviceID = sd_config.deviceID
msgLevel = sd_config.msgLevel
getSdkFlag = sd_config.getSdkFlag
useEdgeFlag = sd_config.useEdgeFlag
localEdgeFlag = sd_config.localEdgeFlag
eventTriggerFlag = sd_config.eventTriggerFlag
soundLogFlag = sd_config.soundLogFlag
soundLogDir = sd_config.soundLogDir
useHistoryFlag = sd_config.useHistoryFlag
remoteControlFlag = sd_config.remoteControlFlag
senseHatFlag = sd_config.senseHatFlag
mngServer = sd_config.mngServer
logHost = sd_config.logHost
fluentHost = sd_config.fluentHost
fluentPort = sd_config.fluentPort
syncMode = sd_config.syncMode
proxies = sd_config.proxies
### Get commandline variables ###
argvs = sys.argv
argc = len(argvs)
if deviceID == '':
if argc == 2:
deviceID = argvs[1]
else:
sys.exit('\nUsage: $ python %s device_id\n' % argvs[0])
elif argc > 1:
sys.exit('\nUsage: $ python %s\n' % argvs[0])
### Setup audio device ###
audioEvent = threading.Event()
deviceIndex, fsTest = setAudio()
### Create sound detector instance ###
chunkSize = fsTest
chunkTimeLen = chunkSize / fsTest
if getSdkFlag: getSdk(deviceID)
import sd_sdk
sd = sd_sdk.soundDetector(deviceID, fsTest, chunkSize, useEdgeFlag=useEdgeFlag, localEdgeFlag=localEdgeFlag, eventTriggerFlag=eventTriggerFlag, soundLogFlag=soundLogFlag, soundLogDir=soundLogDir, useHistoryFlag=useHistoryFlag, msgLevel=msgLevel, mngServer=mngServer, remoteControlFlag=remoteControlFlag, senseHatFlag=senseHatFlag, logHost=logHost, fluentHost=fluentHost, fluentPort=fluentPort, proxies=proxies, version=__version__)
### Analyze audio signal by sound detector ###
inStream = startRecording(deviceIndex, fsTest, chunkSize)
while inStream.is_active():
if msgLevel > 4:
print('Wait for a audioEvent.')
sys.stdout.flush()
audioEvent.wait()
# convert audio chunk into numpy.ndarray of float
npChunk = np.frombuffer(chunk, dtype=np.int16).astype(np.float) / 2**15
audioEvent.clear()
ret = sd.analyze(npChunk, recTime-chunkTimeLen, syncMode=syncMode)
sys.stdout.flush()
| [
"koji@M-TabataYotaro.local"
] | koji@M-TabataYotaro.local |
1dad61829d18a8e059788638d3cb20363e701722 | 436f4ae19c88212a76ef7e9c36ec4d48be558e78 | /Lab 2/dijkstra1.py | d2038a5091daa6c3d25baf3d2a9cd133ab25ecb5 | [] | no_license | kingstarfly/CZ2101 | a905026113e22823111708a71e08769554d1cce7 | 952b1975cb2d5abf0a4b1eb418e36531557c1311 | refs/heads/main | 2023-03-07T13:28:41.186333 | 2021-02-09T03:20:53 | 2021-02-09T03:20:53 | 337,280,371 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,762 | py | """
Suppose the input graphG = (V, E)is stored inanadjacency matrixand we use an arrayforthe priority queue. Implement the Dijkstra’salgorithmusing this settingand analyze its time complexity with respect to|V|and |E|both theoreticallyand empirically
"""
import math
class Graph_adj_matrix:
# Initialize the matrix
def __init__(self, size: int = None, arr: list = None):
if size is None and arr is None:
print("Either size or arr is required.")
return
self.matrix = []
if arr:
self.matrix = arr
self.size = len(arr)
else:
for i in range(size):
self.adjMatrix.append([0 for j in range(size)])
self.size = size
# Add edges
def add_edge(self, v1, v2):
if v1 == v2:
print("Same vertex %d and %d" % (v1, v2))
self.matrix[v1][v2] = 1
self.matrix[v2][v1] = 1
# Remove edges
def remove_edge(self, v1, v2):
if self.matrix[v1][v2] == 0:
print("No edge between %d and %d" % (v1, v2))
return
self.matrix[v1][v2] = 0
self.matrix[v2][v1] = 0
def print_matrix(self):
print(self.matrix)
def __len__(self):
return self.size
class PriorityQueue_array:
def __init__(self) -> None:
self.queue = []
def add(self, v, distance):
self.queue.append((v, distance))
def edit(self, v, new_distance):
for index, (vertex, distance) in enumerate(self.queue):
if vertex == v:
self.queue[index] = (vertex, new_distance)
def pop_min(self):
smallest_index = 0
for i in range(1, len(self.queue)):
if self.queue[i][1] < self.queue[smallest_index][1]:
smallest_index = i
return self.queue.pop(smallest_index)
def is_empty(self):
return len(self.queue) == 0
def dijkstra_shortest_path(g: Graph_adj_matrix, source: int):
# source is assumed to be the index of the vertex (0 index)
# Dijkstra's algorithm
# Use priority queue to keep track of (vertex, dist from source). Purpose is to select the next closest vertex to source.
pq = PriorityQueue_array()
dist = [] # dist[i] tracks distance of i-th vertex from source vertex.
prev = [] # parent[i] points to parent of i-th vertex in shortest path.
visited = [] # visited[i] tells if i-th vertex has been visited.
for v in range(g.size):
dist.append(math.inf)
prev.append(None)
visited.append(False)
dist[source] = 0
for v in range(g.size):
# Initially, source node will be the min in pq, and the rest will be behind since their distance is inf.
pq.add(v, dist[v]) # v[source] is the distance v is from source
while not pq.is_empty():
(u, d) = pq.pop_min()
visited[u] = True
# For each adjacent vertex of u, check distance and update if needed
for v, edge in enumerate(g.matrix[u]):
if (edge != 0 and visited[v] == False and dist[v] > dist[u] + edge):
new_dist = dist[u] + edge
dist[v] = new_dist
prev[v] = u
pq.edit(v, new_dist)
return dist, prev, visited
if __name__ == "__main__":
matrix = [[0, 4, 0, 0, 0, 0, 0, 8, 0], [4, 0, 8, 0, 0, 0, 0, 11, 0], [0, 8, 0, 7, 0, 4, 0, 0, 2], [0, 0, 7, 0, 9, 14, 0, 0, 0], [
0, 0, 0, 9, 0, 10, 0, 0, 0], [0, 0, 4, 14, 10, 0, 2, 0, 0], [0, 0, 0, 0, 0, 2, 0, 1, 6], [8, 11, 0, 0, 0, 0, 1, 0, 7], [0, 0, 2, 0, 0, 0, 6, 7, 0]]
g = Graph_adj_matrix(arr=matrix)
g.print_matrix()
dist, prev, visited = dijkstra_shortest_path(g, 0)
print(dist)
print(prev)
print(visited)
| [
"xingxiang@twotreesgroup.com"
] | xingxiang@twotreesgroup.com |
548eab73bdde0f861d5c66edaeff558f9c6362e0 | 475d1b83b77e2730b53722f0d8d11b070f97018a | /authapp/migrations/backup/0015_auto_20210226_2036.py | f7748376c6cb26aa40cc60e4db0e3f89b135edda | [
"MIT"
] | permissive | Gwellir/my-region | b651284ee4d4ec7ec892bb78a7ce3444c833d035 | baacb7f54a19c55854fd068d6e38b3048a03d13d | refs/heads/main | 2023-04-20T17:31:33.040419 | 2021-05-17T13:35:38 | 2021-05-17T13:35:38 | 336,533,029 | 0 | 1 | MIT | 2021-05-17T13:35:39 | 2021-02-06T12:31:08 | Python | UTF-8 | Python | false | false | 591 | py | # Generated by Django 3.1.6 on 2021-02-26 17:36
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('authapp', '0014_auto_20210226_2033'),
]
operations = [
migrations.AlterField(
model_name='appuser',
name='activation_key_expiry',
field=models.DateTimeField(default=datetime.datetime(2021, 2, 27, 17, 36, 39, 366149, tzinfo=utc), verbose_name='Крайний срок текущей активации'),
),
]
| [
"gwellir@gmail.com"
] | gwellir@gmail.com |
dccb5669c5b88153b3e54fa816eb2c14f67647eb | aa88548d729211428b3d5d7cfb9c3ba5881e168a | /resilient-sdk/tests/unit/test_cmds/test_dev.py | e757157a1b67107f6abd07b6898790070841f922 | [
"MIT"
] | permissive | svetterIO/resilient-python-api | 784cb83aaff353e8aa6ce0000b241a693977b5b9 | d89440ccee621cb4268ee8ebb350e47e7c9ee26b | refs/heads/master | 2023-08-31T22:15:27.588822 | 2021-10-13T13:15:12 | 2021-10-13T13:15:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,733 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) Copyright IBM Corp. 2010, 2020. All Rights Reserved.
import sys
import os
import shutil
import pytest
from resilient_sdk.util import package_file_helpers as package_helpers
from resilient_sdk.util.sdk_exception import SDKException
from resilient_sdk.cmds import base_cmd, CmdDev
from tests.shared_mock_data import mock_paths
def test_cmd_dev(fx_get_sub_parser, fx_cmd_line_args_dev_set_version):
cmd_dev = CmdDev(fx_get_sub_parser)
assert isinstance(cmd_dev, base_cmd.BaseCmd)
assert cmd_dev.CMD_NAME == "dev"
assert cmd_dev.CMD_HELP == "Unsupported functionality used to help develop an app"
assert cmd_dev.CMD_USAGE == """
$ resilient-sdk dev -p <path_to_package> --set-version 36.0.0"""
assert cmd_dev.CMD_DESCRIPTION == "WARNING: Use the functionality of 'dev' at your own risk"
args = cmd_dev.parser.parse_known_args()[0]
assert args.package == "fn_main_mock_integration"
def test_set_version_bad_version(fx_get_sub_parser, fx_cmd_line_args_dev_set_bad_version):
cmd_dev = CmdDev(fx_get_sub_parser)
args = cmd_dev.parser.parse_known_args()[0]
with pytest.raises(SDKException, match=r"is not a valid version"):
CmdDev._set_version(args)
def test_set_version(fx_copy_fn_main_mock_integration, fx_get_sub_parser, fx_cmd_line_args_dev_set_version):
mock_integration_name = fx_copy_fn_main_mock_integration[0]
path_fn_main_mock_integration = fx_copy_fn_main_mock_integration[1]
# Replace cmd line arg "fn_main_mock_integration" with path to temp dir location
sys.argv[sys.argv.index(mock_integration_name)] = path_fn_main_mock_integration
# Parse the setup.py file
path_setup_py_file = os.path.join(path_fn_main_mock_integration, package_helpers.BASE_NAME_SETUP_PY)
setup_py_attributes = package_helpers.parse_setup_py(path_setup_py_file, package_helpers.SUPPORTED_SETUP_PY_ATTRIBUTE_NAMES)
# Get customize.py ImportDefinition
path_customize_py = package_helpers.get_configuration_py_file_path("customize", setup_py_attributes)
customize_py_import_definition = package_helpers.get_import_definition_from_customize_py(path_customize_py)
# Get the old_version
old_version = customize_py_import_definition["server_version"]["version"]
assert old_version == "36.0.0"
# Run _set_version
cmd_dev = CmdDev(fx_get_sub_parser)
args = cmd_dev.parser.parse_known_args()[0]
cmd_dev._set_version(args)
# Get the new_version
customize_py_import_definition = package_helpers.get_import_definition_from_customize_py(path_customize_py)
new_version = customize_py_import_definition["server_version"]["version"]
assert new_version == "35.0.0"
| [
"shane.curtin@ie.ibm.com"
] | shane.curtin@ie.ibm.com |
959688bf2a4078f933a1805552a814424616db66 | c5851bd83217d7d0002703659f6ecd0c930b6c9a | /graphene_demo/demo_latticeinfo.py | a35b472dbe94e5a8937ff3cfa485ac3b397e9647 | [] | no_license | schackv/graphene | bd3b53518d7a17ee8d12b09633542640ea49b1dd | 1452f71e0a39977cb02e60fb807d29fa509092d3 | refs/heads/master | 2020-05-17T15:04:44.032619 | 2014-10-17T12:43:48 | 2014-10-17T12:43:48 | 21,196,394 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,629 | py | # -*- coding: utf-8 -*-
"""
This demo shows how to extract global lattice properties
using the spectral signature in the image.
Created on Fri Jul 25 18:49:53 2014
@author: schackv
"""
import numpy as np
from graphene import *
import matplotlib.pyplot as plt
def demo_latticeinfo():
"""Read an image, retrieve metadata, show the image, estimate parameters
of the lattice using Fourier analysis and visualize the estimates.
"""
## Read DM3 image
DM3 = imtools.dm3image('graphene_regular.dm3')
im = DM3.image()
pixelsize, unit = DM3.pixelsize()
print('Scale according to metadata = {:8f} {:s} per pixel'.format(pixelsize,unit))
# Show image
fig = plt.figure(figsize=(10,10))
plt.matshow(im,fignum=fig.number,cmap=plt.cm.gray)
plt.show(block=False)
## Estimate parameters of lattice
lp = lattice.parameters()
lp.compute(im,options.defaults['lattice'])
print('Estimated hexagonal side length in pixels = {:8f}'.format(lp.t))
print('Rotation in Fourier space = {:4f} radians = {:2f} degrees'.format(lp.theta0, np.rad2deg(lp.theta0)))
# Show lattice power spectrum with found maxima and visualize angle
h, w = lp.PS.shape
plt.matshow(np.log(lp.PS),cmap=plt.cm.gray) # Log of power spectrum
x,y = zip(*lp.fourier_extrema)
xr = lp.fourier_distance * np.cos(lp.theta0)
yr = lp.fourier_distance * np.sin(lp.theta0)
plt.plot(x,y,'o')
plt.plot(np.array([0,xr])+w*0.5,np.array([0,yr])+h*0.5,'-xr')
plt.axis('image')
plt.show(block=True)
if __name__=='__main__':
demo_latticeinfo() | [
"schackv@users.noreply.github.com"
] | schackv@users.noreply.github.com |
c1ca9fea4aec41dcab2df0653fc3476363d164e9 | ecf6fe6aa87b2c3f041acc30fab11b0cafe3dd46 | /architecture_py/archi_v3_9.py | 096d9099c7efed8b00206453651eecc348653e9d | [] | no_license | antgratia/Memoire_code | 73c7806c4576c2e73e00d9a84b1063a2c8f6b559 | 2cdc1339ea24896a6628238f6467edff80f98166 | refs/heads/main | 2023-06-20T16:19:07.041464 | 2021-07-13T11:53:48 | 2021-07-13T11:53:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,178 | py |
import numpy as np
import os
from keras import backend as K
from tensorflow import keras
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.models import Sequential, Model,load_model
from tensorflow.keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D, GlobalAveragePooling2D, MaxPool2D, Concatenate, Dropout
from tensorflow.keras.initializers import glorot_uniform
from tensorflow.keras.utils import plot_model
import tensorflow as tf
import sys
import traceback
import csv
from time import time
type_archi = 'ALL'
epsilon = 0.001
dropout_rate = 0.5
axis = 3
compress_factor = 0.5
# load dataset
(train_x, train_y), (test_x, test_y) = keras.datasets.cifar10.load_data()
# normalize to range 0-1
train_x = train_x / 255.0
test_x = test_x / 255.0
val_x = train_x[:5000]
val_y = train_y[:5000]
# init training time
training_time = 0
# init result test/train
test_result_loss = ""
test_result_acc = ""
train_result_loss = ""
train_result_acc = ""
nb_layers = "not build"
def id_block(X, f, filters, activation):
X_shortcut = X
X = Conv2D(filters=filters, kernel_size=(1, 1), strides=(1, 1), padding='same', kernel_initializer=glorot_uniform(seed=0))(X)
if epsilon != 0:
X = BatchNormalization(epsilon = epsilon, axis=axis)(X)
X = Activation(activation)(X)
X = Conv2D(filters=filters, kernel_size=(f, f), strides=(1, 1), padding='same', kernel_initializer=glorot_uniform(seed=0))(X)
if epsilon != 0:
X = BatchNormalization(epsilon = epsilon, axis=axis)(X)
X = Add()([X, X_shortcut])# SKIP Connection
X = Activation(activation)(X)
return X
def conv_block(X, f, filters, activation, s=2):
X_shortcut = X
X = Conv2D(filters=filters, kernel_size=(1, 1), strides=(s, s), padding='valid', kernel_initializer=glorot_uniform(seed=0))(X)
if epsilon != 0:
X = BatchNormalization(epsilon = epsilon, axis=axis)(X)
X = Activation(activation)(X)
X = Conv2D(filters=filters, kernel_size=(f, f), strides=(1, 1), padding='same', kernel_initializer=glorot_uniform(seed=0))(X)
if epsilon != 0:
X = BatchNormalization(epsilon = epsilon, axis=axis)(X)
X_shortcut = Conv2D(filters=filters, kernel_size=(1, 1), strides=(s, s), padding='valid', kernel_initializer=glorot_uniform(seed=0))(X_shortcut)
if epsilon != 0:
X_shortcut = BatchNormalization(epsilon = epsilon, axis=axis)(X_shortcut)
X = Add()([X, X_shortcut])
X = Activation(activation)(X)
return X
def denseBlock(X, f, nb_filter, nb_layer, padding, activation):
x_input = X
for _ in range(0,nb_layer):
if epsilon != 0:
X = BatchNormalization(epsilon = epsilon, axis=axis)(X)
X = Activation(activation)(X)
X = Conv2D(filters=nb_filter, kernel_size=(f, f), strides=(1, 1), padding=padding)(X)
if dropout_rate != 0:
X = Dropout(dropout_rate)(X)
X = Concatenate()([X, x_input])
return X
def transition_block(X, f, nb_filter, padding, activation, op, stride):
if epsilon != 0:
X = BatchNormalization(epsilon = epsilon, axis=axis)(X)
X = Activation(activation)(X)
X = Conv2D(filters=nb_filter, kernel_size=(f, f), strides=(1, 1), padding=padding)(X)
if dropout_rate != 0:
X = Dropout(dropout_rate)(X)
if (op == 'avg'):
X = AveragePooling2D(pool_size = f, strides=stride, padding=padding)(X)
else :
X = MaxPooling2D(pool_size=f, strides=stride, padding=padding)(X)
return X
try:
def getModel():
X_input = X = Input([32, 32, 3])
X = denseBlock(X, 4, 3, 2, 'same', 'tanh')
X = denseBlock(X, 4, 3, 2, 'same', 'tanh')
X = denseBlock(X, 4, 3, 2, 'same', 'tanh')
X = denseBlock(X, 4, 3, 2, 'same', 'tanh')
X = transition_block(X, 4, 3, 'same', 'tanh', 'avg', 1)
X = id_block(X, 5, 3, 'tanh')
X = Conv2D(18, kernel_size=2, strides=1, activation='relu', padding='same')(X)
X = Conv2D(36, kernel_size=3, strides=3, activation='tanh', padding='same')(X)
X = MaxPooling2D(pool_size=5, strides=4, padding='same')(X)
X = denseBlock(X, 7, 36, 1, 'same', 'tanh')
X = denseBlock(X, 7, 36, 1, 'same', 'tanh')
X = transition_block(X, 7, 36, 'same', 'tanh', 'avg', 5)
X = GlobalAveragePooling2D()(X)
X = Dense(10, activation='softmax')(X)
model = Model(inputs=X_input, outputs=X)
return model
model = getModel()
#plot_model(model, show_shapes=True, to_file="../architecture_img/archi_v3_9.png")
model.compile(optimizer='adam', loss=keras.losses.sparse_categorical_crossentropy, metrics=['accuracy'])
start = time()
es = tf.keras.callbacks.EarlyStopping(monitor='loss', verbose=1, restore_best_weights=True, patience=1)
list_cb = [es]
history = model.fit(train_x, train_y, epochs=50, batch_size=64, validation_split=0.3, callbacks=list_cb)
training_time = time()-start
print(model.evaluate(test_x, test_y))
log_file = open("../architecture_log/archi_v3_9.log" , "w")
# save test result
log_file.write('test result : ' + str(model.evaluate(test_x, test_y)))
test_result_loss = model.evaluate(test_x, test_y)[0]
test_result_acc = model.evaluate(test_x, test_y)[1]
# save train result
log_file.write('train result : ' + str(model.evaluate(test_x, test_y)))
log_file.write('History train result : ' + str(history.history))
train_result_loss = model.evaluate(train_x, train_y)[0]
train_result_acc = model.evaluate(train_x, train_y)[1]
print('OK: file ../architecture_log/archi_v3_9.log has been create')
nb_layers = len(model.layers)
log_file.close()
except:
print('error: file ../architecture_log/archi_v3_9_error.log has been create')
error_file = open("../architecture_log/archi_v3_9_error.log" , "w")
traceback.print_exc(file=error_file)
result_loss = "Error"
result_acc = "Error"
error_file.close()
finally:
file = open('../architecture_results_v3.csv', 'a', newline ='')
with file:
# identifying header
header = ['file_name', 'training_time(s)', 'test_result_loss', 'test_result_acc', 'train_result_acc', 'train_result_loss', 'nb_layers', 'epochs', 'type_archi']
writer = csv.DictWriter(file, fieldnames = header)
# writing data row-wise into the csv file
# writer.writeheader()
writer.writerow({'file_name' : 'archi_v3_9',
'training_time(s)': training_time,
'test_result_loss': test_result_loss,
'test_result_acc': test_result_acc,
'train_result_acc': train_result_acc,
'train_result_loss': train_result_loss,
'nb_layers': nb_layers,
'epochs' : len(history.history['loss']),
'type_archi': type_archi})
print('add line into architecture_results_v3.csv')
file.close()
| [
"antoine.gratia@student.unamur.be"
] | antoine.gratia@student.unamur.be |
1856ed29d527aef3b9ef5fa008bb36e129ce55b8 | 5ccdd5cd7bc7a247e612575d8d5e809f03aaf927 | /Classification/Naive Bayes Classifier_CGPA.py | 677e396ec9551cab2cb5bb05d65fef5ad4ad99d3 | [] | no_license | vermautkarsh44/CGPA-prediction-system | 45c2e22ea2c5c2e321894edfdb691fc6da8c39b9 | 1182669014b6de031639803d66b4f87bf69a5f34 | refs/heads/main | 2023-07-26T13:44:23.519683 | 2021-09-07T18:54:14 | 2021-09-07T18:54:14 | 373,204,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,177 | py | #Naive Bayes Classification
#importing the libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#importing data set
dataset = pd.read_csv("classification data.csv")
x=dataset.iloc[:,0:12].values
y=dataset.iloc[:,12].values
#label encoder
#one hot encoder
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
from sklearn.compose import ColumnTransformer
labelencoder_x=LabelEncoder()
x[:,3]=labelencoder_x.fit_transform(x[:,3])
#ct=ColumnTransformer([('one_hot_encoder',OneHotEncoder(),[3])],remainder='passthrough')
#x=ct.fit_transform(x)
#spliting the data set into training and testing sets
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
#Fitting Naive Bayes to the training set
from sklearn.naive_bayes import GaussianNB
classifier=GaussianNB()
classifier.fit(X_train,y_train)
#Predicting the test set results
y_pred=classifier.predict(X_test)
#Making the confusion Matrix
from sklearn.metrics import confusion_matrix
cm=confusion_matrix(y_test,y_pred)
#Error calculation
from sklearn import metrics
def print_error(X_test, y_test, model_name):
prediction = model_name.predict(X_test)
print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, prediction))
print('Mean Squared Error:', metrics.mean_squared_error(y_test, prediction))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, prediction)))
print_error(X_test,y_test, classifier)
print('Train Score: ', classifier.score(X_train, y_train))
print('Test Score: ', classifier.score(X_test, y_test))
from sklearn.metrics import r2_score
r2_test = r2_score(y_test, y_pred)
print('R2 score: ',r2_test)
#Graph
plt.title("Naive Bayes Classifier")
plt.plot(y_test)
plt.plot(y_pred)
plt.xlabel("Number of students")
plt.ylabel("SGPA 4")
plt.legend(["Test Values","Predicted Values"])
plt.show()
| [
"noreply@github.com"
] | noreply@github.com |
38f0b26a2112884fba6dfa40814ce6e80173777f | ccdc33e143bc074207f1b5bb124c92e7b8486ab4 | /mysite/blog/migrations/0001_initial.py | 4ab29bc210662ff2e8eef4798db99626b496be37 | [] | no_license | morenge/Djangoblog | b5d1aa22187837eaf50e20228d9586bf71bbb61f | a85fbc79053ef05ea8d1e161da1edb897144008c | refs/heads/master | 2020-05-04T10:11:54.200281 | 2019-04-09T19:15:02 | 2019-04-09T19:15:02 | 179,083,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 986 | py | # Generated by Django 2.1.7 on 2019-04-03 15:49
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"michael.orenge@wc.perrillco.local"
] | michael.orenge@wc.perrillco.local |
b6ee81db63eef0b2dd348bc51f31789e4cf0bdfb | 1a266057362b1c69ae9c78a42dc28717cef35111 | /BTCUSDT/CK_Script1_BTCUSDT.py | 512fa6e4dfa527c951434bac388e8582665967c3 | [] | no_license | pranshurastogi/Cryptokart_Market_Making | 1eb3f806124abeecfc1704bdcdcc2d88b6886a6a | a85696072be64d3a89d4a13487459f64ff3ed4f6 | refs/heads/master | 2020-04-30T23:06:34.389889 | 2019-04-14T14:19:11 | 2019-04-14T14:19:11 | 177,136,270 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,359 | py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# Importing required Libraries
import requests
import random
import json
import time
import sys
from binance.client import Client
from binance.enums import *
# In[2]:
#This function is provided with python-binance in order to get data you have to pass Keys here
#For more info go to this URl -> https://python-binance.readthedocs.io/en/latest/binance.html
client = Client("Binance API key","Binance Secret Key") #No need to provide key here in this file as we are using only public data's
#Function to get Market Price from Binance
#Note change prices[11]['symbol']== 'BTCUSDT' this to get price of different markets {11,"BTCUSDT"}
def Market_Price_Binance():
prices = client.get_all_tickers()
if(prices[11]['symbol']== 'BTCUSDT'): #Change this to get different LTP for diff market
LTP_Binance = float(prices[11]['price'])
print("Market price of BTCUSDT -> ", LTP_Binance)
return LTP_Binance
LTP = Market_Price_Binance() #This value is used in variable Price regulator
# In[3]:
#Variable Declaration
client_id = ""
client_secret= "" # Provide API client secret key to access the data
price_regulator = (LTP *(.1))/100 # Take 1% price of Market Price to regulate the spread
price_regulator2 =(LTP *(.3))/100 # Take 3% price of Market Price to regulate the spread
market_name = "BTCUSDT" # Enter the Market Name you want to trade in
user_id = 75 # This is id of the user whose client_ID and secret is used
CK_url = "https://test.cryptokart.io:1337/" # Change this URL to go from Staging to Production
# In[ ]:
# Amount Generator variables
# A random integer will be taken b/w lower bound and upper bound and then it is divided with amt_minimiser
#Example for the range of LB and UB -> 2and 20 and amt_minimizer 400
# the range will be 2/400 and 20/400 -> 0.005 and 0.05[Amount will be between these two]
lower_bound_amt = 0.003
upper_bound_amt = 0.212
# Quantity Round OFF integer
Qty_RoundOff = 3
# Price Round OFF integer
Price_RoundOff = 2
#Bulk Order qty Initially
bulk_order_qty = 10
# Order qty for regular interval
reg_ord_qty = 1
# If order get sudden wiped off qty, this is replaced by more dynamic thing refer line 223
# sudd_ord_qty = 4
# Minimum order we required in a order book
min_order_length = 20
#Time Quantum for time.sleep() to delay b/w two orders
def time_quantum_delay():
time.sleep(5)
# In[ ]:
#Functions to perform some operations
# Note = Side == 1 is "Sell" and Side == 2 is "Buy"
# Note = Beside Side everything else is in string
# Fun(1) -> FireOrderLimit this will place limit order as per the given arguments
def FireOrder_Limit(client_id,client_secret,market_name,side,amount,price):
url = CK_url+"matchengine/order/putLimit"
payload=({"client_id":client_id,"client_secret": client_secret,"market_name": market_name,"side": side,"amount": amount,"price": price})
headers = {
'Content-Type': "application/json",
'cache-control': "no-cache"
}
response = requests.request("POST", url, data=json.dumps(payload), headers=headers)
print(response.text)
# Demo Function Call
# FireOrder_Limit(client_id,client_secret,market_name,1,"0.03","4333")
# Fun(2) -> It will cancel all order at once
def CancelAllOrders(client_id,client_secret):
url = CK_url+"matchengine/order/cancelAll"
payload=({"client_id":client_id,"client_secret": client_secret})
headers = {
'Content-Type': "application/json",
'cache-control': "no-cache"
}
response = requests.request("POST", url, data=json.dumps(payload), headers=headers)
print(response.text)
print("\n \n \t WARNING : ALL ORDERS HAS BEEN CANCELLED\n \n")
# Demo Function Call
# CancelAllOrders(client_id,client_secret)
# Fun(3) -> Pending user Details, It will give details of all the pending orders
# This function is modified to also Provide Number of orders left and generate a random Order ID
def Pending_Orders_User(client_id,client_secret,market_name,user_id):
url = CK_url+"matchengine/order/pending"
payload = ({"client_id" :client_id,
"client_secret":client_secret,
"market_name" :market_name,
"offset" : 0,
"limit" : 100,
"user_id": user_id
})
headers = {
'Content-Type': "application/json",
'cache-control': "no-cache"
}
response = requests.request("POST", url, data=json.dumps(payload), headers=headers)
# From here the function is modified to get Number of orders and Random Order ID
No_of_orders = (len(response.json()['result']['records']))
random_order_number = (random.randint(0,No_of_orders))-1
random_order_id = ((response.json()['result']['records'][random_order_number]['id']))
print(random_order_id)
return random_order_id,No_of_orders
# Demo Function Call
# Pending_Orders_User(client_id,client_secret,market_name,user_id)
# Fun(4) -> Cancel Order , It will cancel a order provided with orderID
def Cancel_Order(client_id,client_secret,market_name,order_id):
url = CK_url+"matchengine/order/cancel"
payload = ({"client_id" :client_id,
"client_secret": client_secret,"market_name" : market_name,"order_id" : order_id
})
headers = {
'Content-Type': "application/json",
'cache-control': "no-cache"
}
response = requests.request("POST", url, data=json.dumps(payload), headers=headers)
print(response.text,"\n \n \t WARNING : THIS ORDER HAS BEEN CANCELLED " , order_id)
# In[ ]:
# Now we have two Function to place Bulk orders of Buy and Sell
def Bulk_Buy_Order(No_of_Orders,arg1_qty_int,arg2_qty_int,arg1_Price_int,arg2_Price_int,market_name):
for i in range(No_of_Orders):
random_qty_int = random.uniform(arg1_qty_int,arg2_qty_int)
random_qty = str(round((random_qty_int),Qty_RoundOff))
random_price = round((random.uniform(arg1_Price_int,arg2_Price_int)),Price_RoundOff)
random_price_str = str(random_price)
Order_placement = FireOrder_Limit(client_id,client_secret,market_name,2,random_qty,random_price_str)
def Bulk_Sell_Order(No_of_Orders,arg1_qty_int,arg2_qty_int,arg1_Price_int,arg2_Price_int,market_name):
for i in range(No_of_Orders):
random_qty_int = random.uniform(arg1_qty_int,arg2_qty_int)
random_qty = str(round((random_qty_int),Qty_RoundOff))
random_price = round((random.uniform(arg1_Price_int,arg2_Price_int)),Price_RoundOff)
random_price_str = str(random_price)
Order_placement = FireOrder_Limit(client_id,client_secret,market_name,1,random_qty,random_price_str)
LTP = Market_Price_Binance() # Will store LTP from Binance
Bulk_Buy_Order(bulk_order_qty,lower_bound_amt,upper_bound_amt,LTP-price_regulator2,LTP-price_regulator,market_name)
print("\n \n \t BULK BUY ORDERS HAS BEEN PLACED \n \n")
Bulk_Sell_Order(bulk_order_qty,lower_bound_amt,upper_bound_amt,LTP+price_regulator,LTP + price_regulator2,market_name)
print("\n \n \t BULK SELL ORDERS HAS BEEN PLACED \n \n")
# In[ ]:
# This is where Magic Happens , This function will run continously and place and cancel order accordingly
def Trade_Order_Limit():
while(True):
try:
Order_len = Pending_Orders_User(client_id,client_secret,market_name,user_id)[1]
if(Order_len > min_order_length):
LTP = Market_Price_Binance() # Will store LTP from Binance
order_id = Pending_Orders_User(client_id,client_secret,market_name,user_id)[0] # Random Order ID generated
Cancel_Order(client_id,client_secret,market_name,order_id)
Bulk_Buy_Order(reg_ord_qty,lower_bound_amt,upper_bound_amt,LTP-price_regulator2,LTP-price_regulator,market_name)
time_quantum_delay()
order_id = Pending_Orders_User(client_id,client_secret,market_name,user_id)[0]
Cancel_Order(client_id,client_secret,market_name,order_id)
Bulk_Sell_Order(reg_ord_qty,lower_bound_amt,upper_bound_amt,LTP+price_regulator,LTP + price_regulator2,market_name)
time_quantum_delay()
else:
print(Order_len)
LTP = Market_Price_Binance() # Will store LTP from Binance
value = min_order_length - Order_len
value = (int(value/2)) + 1
for i in range(value):
Bulk_Buy_Order(1,lower_bound_amt,upper_bound_amt,LTP-price_regulator2,LTP-price_regulator,market_name)
Bulk_Sell_Order(1,lower_bound_amt,upper_bound_amt,LTP+price_regulator,LTP + price_regulator2,market_name)
except KeyboardInterrupt:
# CancelAllOrders(client_id,client_secret)
print ("May the force with you")
sys.exit()
except Exception as e:
# CancelAllOrders(client_id,client_secret)
print("\n \n \t \t EXCEPTION FOUND !! BE CAUTIOUS \n \n \n")
print(e)
continue
Trade_Order_Limit()
| [
"pranshurastogi3196@gmail.com"
] | pranshurastogi3196@gmail.com |
9b6dd8457bbe227e389eafff6035c5c0e64fe5ab | 310c88aed28df25637f60e41a067885371d0c644 | /ttn/ttnTest1.py | 806ce419ad62420b4d78b3875cd5c1a0c609bf29 | [] | no_license | juankgp/pythonPoo2 | 1757644d2db98827d3fb49e0782435d2408d170c | eb06240f17a64666be7f1161503c0117dd3d97aa | refs/heads/master | 2022-12-30T20:43:29.020081 | 2020-10-24T03:27:15 | 2020-10-24T03:27:15 | 282,591,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 855 | py | import time
import ttn
app_id = "wisnodearduino1"
access_key = "ttn-account-v2.QAT09KX6NTZPal64F1iwMQvTB5kHgQj__nP0etRhSek"
dev_id = "wisnode1"
estado = True
def uplink_callback(msg,client):
print("Uplink recibido de: ",msg.dev_id)
global estado
if msg.dev_id == "boton4200" :
print(msg)
if estado:
payload ="Qg=="
estado = False
else:
payload ="Qw=="
estado = True
#payload = "Qw=="
client.send(dev_id, payload, port=1, conf=False, sched="replace")
handler = ttn.HandlerClient(app_id,access_key)
#usando cliente mqtt
mqtt_client = handler.data()
mqtt_client.set_uplink_callback(uplink_callback)
mqtt_client.connect()
#time.sleep(60)
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
pass | [
"jukyarosinc@gmail.com"
] | jukyarosinc@gmail.com |
530471f037bc00e60919e8f72ddc975963f1b9f1 | b06e865affafcf88712462320119e5fb296a10f1 | /v7/VRP/script.py | a658b4254c12432d032e0f57ebf9513631d9d19f | [] | no_license | Kanchii/VRP-Python | fb08478fc1be97e46dcc9e78520033bc275f1cb4 | a66461487f1cd33a24e19e518a906da95b360245 | refs/heads/master | 2021-07-05T02:44:28.453805 | 2019-04-07T16:53:00 | 2019-04-07T16:53:00 | 145,447,604 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 658 | py | import os
import subprocess
tamanho = [10, 9, 16, 13]
media = 0
for j in range(10):
tot = 0
for i, tam in enumerate(tamanho):
# os.system("python main.py {} {}".format(tam, i))
result = subprocess.Popen(['python', 'main.py', str(tam), str(i)], stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
total_distance = float(result.communicate()[0].replace('\n', ''))
print("Resultado para o deposito #{}: {}".format(i + 1, total_distance))
tot += total_distance
print("Resultado da execucao #{}: {}".format(j + 1, tot))
media += tot
print("Media final depois de 10 execucoes: {}".format(media / 10.0))
| [
"weisslipe@gmail.com"
] | weisslipe@gmail.com |
3f27f3ccb024ec56a7fe0313c0150ecfcf0d8ccb | 8818117553314078f4143eba999c42f01d2176e9 | /envv/bin/mako-render | 315fe4c0e9e1d7bc33286ecd372b8ba3358f5cc2 | [] | no_license | bolgarun/hello | ea3214373e54d2880491d90ef5aea58fb66474eb | 5378ac053c730a1634d07b0e0ecdf3c53cb41df2 | refs/heads/master | 2020-05-21T00:29:09.935650 | 2019-05-14T09:36:44 | 2019-05-14T09:36:44 | 185,827,334 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | #!/home/user/Документи/Global/MYprogect/envv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from mako.cmd import cmdline
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(cmdline())
| [
"bolgarun21@gmail.com"
] | bolgarun21@gmail.com | |
57ec71f8f366f169baa43555a895ff8842a42839 | c3a3beda6fe3a9bbd5b240477f542a46dd92823a | /functions/TH/08_keyword_args.py | c76f034292ded2cad08e665202843d5d48be93cc | [] | no_license | nfarnan/cs001X_examples | 2e64955b705c8ac9c4319becf6344d36b9560e78 | 80b612249fa97ff685f345582f184d57f94bff8e | refs/heads/master | 2020-12-11T14:06:00.890074 | 2020-04-14T19:41:02 | 2020-04-14T19:41:02 | 209,681,009 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | def kw_test(a=1, b=2):
print(a, b)
kw_test()
kw_test(5, 10)
kw_test(5)
kw_test(b=10)
# this will error
#kw_test(5, 10, 20)
| [
"nlf4@pitt.edu"
] | nlf4@pitt.edu |
c18790f1c9ea9c59ebe70356fd6eafa773ba7a3f | 32ef8621468095bf9c6dd912767cb97e9863dc25 | /algorithms/kaprekar-numbers.py | d4e44e799005d22fc4109908b61ebb0ee1e5e43c | [] | no_license | Seungju182/Hackerrank | 286f1666be5797c1d318788753245696ef52decf | 264533f97bcc8dc771e4e6cbae1937df8ce6bafa | refs/heads/master | 2023-08-17T22:49:58.710410 | 2021-10-25T09:40:46 | 2021-10-25T09:40:46 | 337,652,088 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 626 | py | #!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'kaprekarNumbers' function below.
#
# The function accepts following parameters:
# 1. INTEGER p
# 2. INTEGER q
#
def kaprekarNumbers(p, q):
# Write your code here
lst = []
for num in range(p, q+1):
squared = num ** 2
d = 10 ** len(str(num))
if squared // d + squared % d == num:
lst.append(num)
if lst:
print(*lst)
else:
print("INVALID RANGE")
if __name__ == '__main__':
p = int(input().strip())
q = int(input().strip())
kaprekarNumbers(p, q)
| [
"tonysj@snu.ac.kr"
] | tonysj@snu.ac.kr |
9ca5ac9b0309eeb6b7ae197443b0c2be0b04ea69 | 59ac1d0f09ebfb527701031f3ab2cfbfb8055f51 | /soapsales/customers/signals.py | fc93f81a6adc77f22799cb456aa27326ae4c6f21 | [] | no_license | DUMBALINYOLO/erpmanu | d4eb61b66cfa3704bd514b58580bdfec5639e3b0 | db979bafcc7481f60af467d1f48d0a81bbbfc1aa | refs/heads/master | 2023-04-28T13:07:45.593051 | 2021-05-12T09:30:23 | 2021-05-12T09:30:23 | 288,446,097 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 536 | py | from django.db.models.signals import pre_save, post_save
from django.dispatch import receiver
import uuid
from django.db import transaction
from customers.models import Customer
@receiver(post_save, sender=Customer)
def post_save_create_customer_number_and_customer_number(sender, instance, created, **kwargs):
if created:
instance.create_customer_account()
if instance.customer_number == '':
instance.customer_number = str(uuid.uuid4()).replace("-", '').upper()[:20]
instance.save()
| [
"baridzimaximillem@gmail.com"
] | baridzimaximillem@gmail.com |
a5965fc467bddd784c84dadd78df68bd455047a5 | d345846f520bf43bc2e1b905bb1c141e6b0c7250 | /clone_instagram_api/users/migrations/0005_auto_20180423_1748.py | b894697754d025d6b63eebdbb89e622f66822272 | [
"MIT"
] | permissive | NaiveMonkey/Clone-Instagram-API | edff95332e12829fc7b472a26516b2e15fbad8d3 | c0f17bfb8bf3bb0107d3fcebacd00010db853c23 | refs/heads/master | 2021-09-12T04:02:16.675445 | 2021-07-15T14:21:40 | 2021-07-15T14:21:40 | 129,679,798 | 1 | 0 | MIT | 2021-09-10T18:04:47 | 2018-04-16T03:31:19 | Python | UTF-8 | Python | false | false | 704 | py | # Generated by Django 2.0.4 on 2018-04-23 08:48
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0004_user_profile_image'),
]
operations = [
migrations.AlterField(
model_name='user',
name='followers',
field=models.ManyToManyField(blank=True, related_name='_user_followers_+', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='user',
name='following',
field=models.ManyToManyField(blank=True, related_name='_user_following_+', to=settings.AUTH_USER_MODEL),
),
]
| [
"ksmr1102@gmail.com"
] | ksmr1102@gmail.com |
8262a9ad72ac1f8b9b92c6d3dab636e47655415c | 7e38634a9afb227ac6f18bbf3cb35cfef27f04b6 | /wiki.py | 93d64c4a05ce9dbc637c9774934214035bfa1935 | [] | no_license | AlexLisii/wiki_questions_game | 858417b6ac0b0c8c1de199ebb46f4e66d1d82a9b | f806e160148f50c49bff47257958bd69b467785c | refs/heads/master | 2020-04-04T19:27:18.677088 | 2014-07-31T23:32:36 | 2014-07-31T23:32:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,527 | py | #!/usr/bin/env python
import requests
import re
import argparse
import sys
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("secret")
args = parser.parse_args()
return args
def call(url):
headers = {"Content-type": "application/json"}
response = requests.get(url, headers=headers)
if response.status_code != 200:
sys.exit('Status:', response.status_code, 'There was a problem with the request')
return response.json()
def get_text():
secret = parse_args().secret
url = "http://en.wikipedia.org/w/api.php?action=parse&page=%s&format=json&prop=text§ion=0" % secret
response = call(url)
# get only text from article
response = response["parse"]["text"]["*"]
# get article first paragraph
match = re.search(r'<p>(.+)</p>', response)
# check if article exists
if match:
question = re.sub(r'<[^>]*>', '', match.group(0))
#print question
else:
sys.exit('The article for %s not found. Try another word.' % secret)
# replace the secret word with ???
question = re.sub(ur'(?i)%r[A-Za-z]{0,3}[\s\"\.,:;!?\']' % secret[:-1], '???' + ' ', question, re.UNICODE)
# remove text in square brackets, i.e. links numbers
question = re.sub(ur'\[[A-Za-z0-9 ]*\]', '', question, re.UNICODE)
# remove text in parentheses, i.e. pronunciation or latin names for subjects
question = re.sub(ur'\s\(([^\)]+)[\)]+', '', question, re.UNICODE)
#question = re.sub(r'\s+', ' ', question)
#print question
return question.encode('utf-8')
def get_images():
secret = parse_args().secret
url = "http://en.wikipedia.org/w/api.php?action=query&titles=%s&prop=images&format=json" % secret
response = call(url)
#get images names
image_titles = []
images = response["query"]["pages"].values()[0]["images"]
for image in images:
if secret.lower() in image["title"].lower():
image_titles.append(image["title"])
image_urls = []
for title in image_titles:
url = "http://en.wikipedia.org/w/api.php?action=query&titles=%s&prop=imageinfo&iiprop=url&format=json" % title
image_url = call(url)["query"]["pages"].values()[0]["imageinfo"][0]["url"]
image_urls.append(image_url)
return image_urls
if __name__ == "__main__":
with open("wiki.html", "w") as f:
f.write("<p>")
f.write(get_text())
f.write("</p>")
for i in get_images():
f.write('<img src="%s" width="320">' % i)
| [
"aleksandr@pistoncloud.com"
] | aleksandr@pistoncloud.com |
822896e26b43d3434fdf04b422731eea84253a0a | 1f7d41218ad07fb681d3e470f9bef8bf45b36db0 | /mmskeleton/__init__.py | 18d198de0b44c44f7be3a8398d00819d07513e18 | [] | no_license | 8secz-johndpope/temporal_inverse_kinematics | 138070473f16a42ed9f61f98a3a42f5778bd3c17 | 338367acca2bb468c54b6b08cbc9f3c4cd0f0c49 | refs/heads/master | 2022-12-04T13:51:13.117086 | 2020-08-29T13:05:55 | 2020-08-29T13:05:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121 | py | from . import utils
from . import datasets, processor, models, ops, apis
from .datasets.skeleton import skeleton_process
| [
"khanh@move.ai"
] | khanh@move.ai |
9265c9b59e49fe1f33cc7f34c6f9a7824f93a756 | 93ee07a1e427af25dffa5964c5495e7943124dd2 | /environment/chasingEnv/multiAgentEnvWithIndividReward.py | f101ecc4d83d2b180531c775d12e7ef21a00f7ec | [] | no_license | Frostwoods/ChasingWithRopeMujocoEnv | 62cb6c19c6f8885a75d6b4f4da8bac96411f33d1 | e3cc356c687438e37ed39ea4b159607c7120e7f0 | refs/heads/main | 2023-03-09T09:35:18.493078 | 2021-02-28T12:52:03 | 2021-02-28T12:52:03 | 343,102,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 940 | py |
class RewardWolfIndividual:
def __init__(self, wolvesID, sheepsID, entitiesSizeList, isCollision, collisionReward=10):
self.wolvesID = wolvesID
self.sheepsID = sheepsID
self.entitiesSizeList = entitiesSizeList
self.isCollision = isCollision
self.collisionReward = collisionReward
def __call__(self, state, action, nextState):
reward = []
for wolfID in self.wolvesID:
currentWolfReward = 0
wolfSize = self.entitiesSizeList[wolfID]
wolfNextState = nextState[wolfID]
for sheepID in self.sheepsID:
sheepSize = self.entitiesSizeList[sheepID]
sheepNextState = nextState[sheepID]
if self.isCollision(wolfNextState, sheepNextState, wolfSize, sheepSize):
currentWolfReward += self.collisionReward
reward.append(currentWolfReward)
return reward | [
"frostwoods@foxmail.com"
] | frostwoods@foxmail.com |
a098ada26a3eadfefcb12e2b1491533b9979db93 | 49e72df481bec1501202d7411a55b765c33355ba | /luminar project/functional_programming/list_comprension.py | c1b8cb686c667cb27c47cdcc0d73f8fa7a8b1deb | [] | no_license | JEENUMINI/pythonpgmsupdated | ae6d62bc58e1d44ba81a21637335140119c76869 | 4816ec24693034af36d4b76887d34c9a499f4cc8 | refs/heads/main | 2023-01-23T13:36:28.478938 | 2020-12-15T18:18:07 | 2020-12-15T18:18:07 | 321,749,979 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py | lst=[1,2,3,4,5,6]
#squares
squares=[i*i for i in lst]
print(squares)
square2=[i**2 for i in lst]
print(square2)
#fetch even no from list
even=[i for i in lst if i%2==0]
print(even)
#list placement question
# task=[i+1 if i>5 else i-1 for i in lst]
# print(task)
task=[i+1 if i>5 else (i-1 if i<5 else i) for i in lst]
print(task) | [
"mini13.1994@gmail.com"
] | mini13.1994@gmail.com |
ed90a8b72de29d7b10b3c172b2e9a82442d7c296 | 7b1d3d0fdc5a35680069dec7575f07b28eca27ae | /pandas/io/parsers/c_parser_wrapper.py | a6647df94796149459edf0e6284eefd221393832 | [
"BSD-3-Clause"
] | permissive | KalyanGokhale/pandas | 0efa05525f153c8bbd7c199d9b247306bea9f1d9 | 829444a28573399ecaeeefe7255df3a95b285b86 | refs/heads/master | 2023-05-01T14:04:09.950849 | 2023-04-24T18:49:47 | 2023-04-24T18:49:47 | 132,631,477 | 0 | 0 | BSD-3-Clause | 2018-10-01T17:09:07 | 2018-05-08T15:48:58 | Python | UTF-8 | Python | false | false | 14,578 | py | from __future__ import annotations
from collections import defaultdict
from typing import (
TYPE_CHECKING,
Hashable,
Mapping,
Sequence,
)
import warnings
import numpy as np
from pandas._libs import (
lib,
parsers,
)
from pandas.compat._optional import import_optional_dependency
from pandas.errors import DtypeWarning
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.common import pandas_dtype
from pandas.core.dtypes.concat import (
concat_compat,
union_categoricals,
)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.api import ensure_index_from_sequences
from pandas.io.common import (
dedup_names,
is_potential_multi_index,
)
from pandas.io.parsers.base_parser import (
ParserBase,
ParserError,
is_index_col,
)
if TYPE_CHECKING:
from pandas._typing import (
ArrayLike,
DtypeArg,
DtypeObj,
ReadCsvBuffer,
)
from pandas import (
Index,
MultiIndex,
)
class CParserWrapper(ParserBase):
low_memory: bool
_reader: parsers.TextReader
def __init__(self, src: ReadCsvBuffer[str], **kwds) -> None:
super().__init__(kwds)
self.kwds = kwds
kwds = kwds.copy()
self.low_memory = kwds.pop("low_memory", False)
# #2442
# error: Cannot determine type of 'index_col'
kwds["allow_leading_cols"] = (
self.index_col is not False # type: ignore[has-type]
)
# GH20529, validate usecol arg before TextReader
kwds["usecols"] = self.usecols
# Have to pass int, would break tests using TextReader directly otherwise :(
kwds["on_bad_lines"] = self.on_bad_lines.value
for key in (
"storage_options",
"encoding",
"memory_map",
"compression",
):
kwds.pop(key, None)
kwds["dtype"] = ensure_dtype_objs(kwds.get("dtype", None))
if "dtype_backend" not in kwds or kwds["dtype_backend"] is lib.no_default:
kwds["dtype_backend"] = "numpy"
if kwds["dtype_backend"] == "pyarrow":
# Fail here loudly instead of in cython after reading
import_optional_dependency("pyarrow")
self._reader = parsers.TextReader(src, **kwds)
self.unnamed_cols = self._reader.unnamed_cols
# error: Cannot determine type of 'names'
passed_names = self.names is None # type: ignore[has-type]
if self._reader.header is None:
self.names = None
else:
# error: Cannot determine type of 'names'
# error: Cannot determine type of 'index_names'
(
self.names, # type: ignore[has-type]
self.index_names,
self.col_names,
passed_names,
) = self._extract_multi_indexer_columns(
self._reader.header,
self.index_names, # type: ignore[has-type]
passed_names,
)
# error: Cannot determine type of 'names'
if self.names is None: # type: ignore[has-type]
self.names = list(range(self._reader.table_width))
# gh-9755
#
# need to set orig_names here first
# so that proper indexing can be done
# with _set_noconvert_columns
#
# once names has been filtered, we will
# then set orig_names again to names
# error: Cannot determine type of 'names'
self.orig_names = self.names[:] # type: ignore[has-type]
if self.usecols:
usecols = self._evaluate_usecols(self.usecols, self.orig_names)
# GH 14671
# assert for mypy, orig_names is List or None, None would error in issubset
assert self.orig_names is not None
if self.usecols_dtype == "string" and not set(usecols).issubset(
self.orig_names
):
self._validate_usecols_names(usecols, self.orig_names)
# error: Cannot determine type of 'names'
if len(self.names) > len(usecols): # type: ignore[has-type]
# error: Cannot determine type of 'names'
self.names = [ # type: ignore[has-type]
n
# error: Cannot determine type of 'names'
for i, n in enumerate(self.names) # type: ignore[has-type]
if (i in usecols or n in usecols)
]
# error: Cannot determine type of 'names'
if len(self.names) < len(usecols): # type: ignore[has-type]
# error: Cannot determine type of 'names'
self._validate_usecols_names(
usecols,
self.names, # type: ignore[has-type]
)
# error: Cannot determine type of 'names'
self._validate_parse_dates_presence(self.names) # type: ignore[has-type]
self._set_noconvert_columns()
# error: Cannot determine type of 'names'
self.orig_names = self.names # type: ignore[has-type]
if not self._has_complex_date_col:
# error: Cannot determine type of 'index_col'
if self._reader.leading_cols == 0 and is_index_col(
self.index_col # type: ignore[has-type]
):
self._name_processed = True
(
index_names,
# error: Cannot determine type of 'names'
self.names, # type: ignore[has-type]
self.index_col,
) = self._clean_index_names(
# error: Cannot determine type of 'names'
self.names, # type: ignore[has-type]
# error: Cannot determine type of 'index_col'
self.index_col, # type: ignore[has-type]
)
if self.index_names is None:
self.index_names = index_names
if self._reader.header is None and not passed_names:
assert self.index_names is not None
self.index_names = [None] * len(self.index_names)
self._implicit_index = self._reader.leading_cols > 0
def close(self) -> None:
# close handles opened by C parser
try:
self._reader.close()
except ValueError:
pass
def _set_noconvert_columns(self) -> None:
"""
Set the columns that should not undergo dtype conversions.
Currently, any column that is involved with date parsing will not
undergo such conversions.
"""
assert self.orig_names is not None
# error: Cannot determine type of 'names'
# much faster than using orig_names.index(x) xref GH#44106
names_dict = {x: i for i, x in enumerate(self.orig_names)}
col_indices = [names_dict[x] for x in self.names] # type: ignore[has-type]
# error: Cannot determine type of 'names'
noconvert_columns = self._set_noconvert_dtype_columns(
col_indices,
self.names, # type: ignore[has-type]
)
for col in noconvert_columns:
self._reader.set_noconvert(col)
def read(
self,
nrows: int | None = None,
) -> tuple[
Index | MultiIndex | None,
Sequence[Hashable] | MultiIndex,
Mapping[Hashable, ArrayLike],
]:
index: Index | MultiIndex | None
column_names: Sequence[Hashable] | MultiIndex
try:
if self.low_memory:
chunks = self._reader.read_low_memory(nrows)
# destructive to chunks
data = _concatenate_chunks(chunks)
else:
data = self._reader.read(nrows)
except StopIteration:
if self._first_chunk:
self._first_chunk = False
names = dedup_names(
self.orig_names,
is_potential_multi_index(self.orig_names, self.index_col),
)
index, columns, col_dict = self._get_empty_meta(
names,
self.index_col,
self.index_names,
dtype=self.kwds.get("dtype"),
)
columns = self._maybe_make_multi_index_columns(columns, self.col_names)
if self.usecols is not None:
columns = self._filter_usecols(columns)
col_dict = {k: v for k, v in col_dict.items() if k in columns}
return index, columns, col_dict
else:
self.close()
raise
# Done with first read, next time raise StopIteration
self._first_chunk = False
# error: Cannot determine type of 'names'
names = self.names # type: ignore[has-type]
if self._reader.leading_cols:
if self._has_complex_date_col:
raise NotImplementedError("file structure not yet supported")
# implicit index, no index names
arrays = []
if self.index_col and self._reader.leading_cols != len(self.index_col):
raise ParserError(
"Could not construct index. Requested to use "
f"{len(self.index_col)} number of columns, but "
f"{self._reader.leading_cols} left to parse."
)
for i in range(self._reader.leading_cols):
if self.index_col is None:
values = data.pop(i)
else:
values = data.pop(self.index_col[i])
values = self._maybe_parse_dates(values, i, try_parse_dates=True)
arrays.append(values)
index = ensure_index_from_sequences(arrays)
if self.usecols is not None:
names = self._filter_usecols(names)
names = dedup_names(names, is_potential_multi_index(names, self.index_col))
# rename dict keys
data_tups = sorted(data.items())
data = {k: v for k, (i, v) in zip(names, data_tups)}
column_names, date_data = self._do_date_conversions(names, data)
# maybe create a mi on the columns
column_names = self._maybe_make_multi_index_columns(
column_names, self.col_names
)
else:
# rename dict keys
data_tups = sorted(data.items())
# ugh, mutation
# assert for mypy, orig_names is List or None, None would error in list(...)
assert self.orig_names is not None
names = list(self.orig_names)
names = dedup_names(names, is_potential_multi_index(names, self.index_col))
if self.usecols is not None:
names = self._filter_usecols(names)
# columns as list
alldata = [x[1] for x in data_tups]
if self.usecols is None:
self._check_data_length(names, alldata)
data = {k: v for k, (i, v) in zip(names, data_tups)}
names, date_data = self._do_date_conversions(names, data)
index, column_names = self._make_index(date_data, alldata, names)
return index, column_names, date_data
def _filter_usecols(self, names: Sequence[Hashable]) -> Sequence[Hashable]:
# hackish
usecols = self._evaluate_usecols(self.usecols, names)
if usecols is not None and len(names) != len(usecols):
names = [
name for i, name in enumerate(names) if i in usecols or name in usecols
]
return names
def _get_index_names(self):
names = list(self._reader.header[0])
idx_names = None
if self._reader.leading_cols == 0 and self.index_col is not None:
(idx_names, names, self.index_col) = self._clean_index_names(
names, self.index_col
)
return names, idx_names
def _maybe_parse_dates(self, values, index: int, try_parse_dates: bool = True):
if try_parse_dates and self._should_parse_dates(index):
values = self._date_conv(
values,
col=self.index_names[index] if self.index_names is not None else None,
)
return values
def _concatenate_chunks(chunks: list[dict[int, ArrayLike]]) -> dict:
"""
Concatenate chunks of data read with low_memory=True.
The tricky part is handling Categoricals, where different chunks
may have different inferred categories.
"""
names = list(chunks[0].keys())
warning_columns = []
result: dict = {}
for name in names:
arrs = [chunk.pop(name) for chunk in chunks]
# Check each arr for consistent types.
dtypes = {a.dtype for a in arrs}
non_cat_dtypes = {x for x in dtypes if not isinstance(x, CategoricalDtype)}
dtype = dtypes.pop()
if isinstance(dtype, CategoricalDtype):
result[name] = union_categoricals(arrs, sort_categories=False)
else:
result[name] = concat_compat(arrs)
if len(non_cat_dtypes) > 1 and result[name].dtype == np.dtype(object):
warning_columns.append(str(name))
if warning_columns:
warning_names = ",".join(warning_columns)
warning_message = " ".join(
[
f"Columns ({warning_names}) have mixed types. "
f"Specify dtype option on import or set low_memory=False."
]
)
warnings.warn(warning_message, DtypeWarning, stacklevel=find_stack_level())
return result
def ensure_dtype_objs(
dtype: DtypeArg | dict[Hashable, DtypeArg] | None
) -> DtypeObj | dict[Hashable, DtypeObj] | None:
"""
Ensure we have either None, a dtype object, or a dictionary mapping to
dtype objects.
"""
if isinstance(dtype, defaultdict):
# "None" not callable [misc]
default_dtype = pandas_dtype(dtype.default_factory()) # type: ignore[misc]
dtype_converted: defaultdict = defaultdict(lambda: default_dtype)
for key in dtype.keys():
dtype_converted[key] = pandas_dtype(dtype[key])
return dtype_converted
elif isinstance(dtype, dict):
return {k: pandas_dtype(dtype[k]) for k in dtype}
elif dtype is not None:
return pandas_dtype(dtype)
return dtype
| [
"noreply@github.com"
] | noreply@github.com |
33013989259884ab0ed306b1a8ffd64725df92f6 | 7c009d77bc0124b69abdd5bbf4d00ee00a6de881 | /process/migrations/0020_auto_20210606_1321.py | 23a2cb9944ae79b25e63e50e2bb315ad1da36180 | [] | no_license | Rajeshwari33/POProcess | 85598b3bb78c1bcc3bea583fcd106fd32eb97c99 | dde399029b01554f97988709688e14193a96cb1a | refs/heads/master | 2023-05-25T18:33:45.589819 | 2021-06-15T16:27:37 | 2021-06-15T16:27:37 | 367,557,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 866 | py | # Generated by Django 3.2 on 2021-06-06 07:51
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('process', '0019_mailcredentials'),
]
operations = [
migrations.AddField(
model_name='mailcredentials',
name='created_by',
field=models.PositiveSmallIntegerField(null=True, verbose_name='User Id'),
),
migrations.AddField(
model_name='mailcredentials',
name='created_date',
field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='Created Date'),
),
migrations.AddField(
model_name='mailcredentials',
name='is_active',
field=models.BooleanField(default=True, verbose_name='Active ?'),
),
]
| [
"you@example.com"
] | you@example.com |
fc931823e2e0c5dadbbef45f1c7f9f23c9c60607 | c52f407e12599b5b850b09c295b6254e07225e81 | /altair/vegalite/v4/api.py | f99f605f3c0e4110bade8d4f99c1ce45cc236e77 | [
"BSD-3-Clause"
] | permissive | chris1610/altair | 35fa090f11b39a88f9789f86bd68107924d8d84a | 9662da9f702c441c8acec4bbb9d71a22475638c2 | refs/heads/master | 2020-09-27T18:48:06.470667 | 2019-12-08T19:44:29 | 2019-12-08T19:44:29 | 226,583,447 | 3 | 1 | BSD-3-Clause | 2019-12-07T22:22:08 | 2019-12-07T22:22:07 | null | UTF-8 | Python | false | false | 84,058 | py | # -*- coding: utf-8 -*-
import warnings
import hashlib
import io
import json
import jsonschema
import pandas as pd
from .schema import core, channels, mixins, Undefined, SCHEMA_URL
from .data import data_transformers, pipe
from ... import utils, expr
from .display import renderers, VEGALITE_VERSION, VEGAEMBED_VERSION, VEGA_VERSION
from .theme import themes
# ------------------------------------------------------------------------
# Data Utilities
def _dataset_name(values):
"""Generate a unique hash of the data
Parameters
----------
values : list or dict
A list/dict representation of data values.
Returns
-------
name : string
A unique name generated from the hash of the values.
"""
if isinstance(values, core.InlineDataset):
values = values.to_dict()
values_json = json.dumps(values, sort_keys=True)
hsh = hashlib.md5(values_json.encode()).hexdigest()
return 'data-' + hsh
def _consolidate_data(data, context):
"""If data is specified inline, then move it to context['datasets']
This function will modify context in-place, and return a new version of data
"""
values = Undefined
kwds = {}
if isinstance(data, core.InlineData):
if data.name is Undefined and data.values is not Undefined:
values = data.values
kwds = {'format': data.format}
elif isinstance(data, dict):
if 'name' not in data and 'values' in data:
values = data['values']
kwds = {k:v for k,v in data.items() if k != 'values'}
if values is not Undefined:
name = _dataset_name(values)
data = core.NamedData(name=name, **kwds)
context.setdefault('datasets', {})[name] = values
return data
def _prepare_data(data, context=None):
"""Convert input data to data for use within schema
Parameters
----------
data :
The input dataset in the form of a DataFrame, dictionary, altair data
object, or other type that is recognized by the data transformers.
context : dict (optional)
The to_dict context in which the data is being prepared. This is used
to keep track of information that needs to be passed up and down the
recursive serialization routine, such as global named datasets.
"""
if data is Undefined:
return data
# convert dataframes or objects with __geo_interface__ to dict
if isinstance(data, pd.DataFrame) or hasattr(data, '__geo_interface__'):
data = pipe(data, data_transformers.get())
# convert string input to a URLData
if isinstance(data, str):
data = core.UrlData(data)
# consolidate inline data to top-level datasets
if context is not None and data_transformers.consolidate_datasets:
data = _consolidate_data(data, context)
# if data is still not a recognized type, then return
if not isinstance(data, (dict, core.Data)):
warnings.warn("data of type {} not recognized".format(type(data)))
return data
# ------------------------------------------------------------------------
# Aliases & specializations
Bin = core.BinParams
@utils.use_signature(core.LookupData)
class LookupData(core.LookupData):
def to_dict(self, *args, **kwargs):
"""Convert the chart to a dictionary suitable for JSON export"""
copy = self.copy(deep=False)
copy.data = _prepare_data(copy.data, kwargs.get('context'))
return super(LookupData, copy).to_dict(*args, **kwargs)
@utils.use_signature(core.FacetMapping)
class FacetMapping(core.FacetMapping):
_class_is_valid_at_instantiation = False
def to_dict(self, *args, **kwargs):
copy = self.copy(deep=False)
context = kwargs.get('context', {})
data = context.get('data', None)
if isinstance(self.row, str):
copy.row = core.FacetFieldDef(**utils.parse_shorthand(self.row, data))
if isinstance(self.column, str):
copy.column = core.FacetFieldDef(**utils.parse_shorthand(self.column, data))
return super(FacetMapping, copy).to_dict(*args, **kwargs)
# ------------------------------------------------------------------------
# Encoding will contain channel objects that aren't valid at instantiation
core.FacetedEncoding._class_is_valid_at_instantiation = False
# ------------------------------------------------------------------------
# These are parameters that are valid at the top level, but are not valid
# for specs that are within a composite chart
# (layer, hconcat, vconcat, facet, repeat)
TOPLEVEL_ONLY_KEYS = {'background', 'config', 'autosize', 'padding', '$schema'}
def _get_channels_mapping():
mapping = {}
for attr in dir(channels):
cls = getattr(channels, attr)
if isinstance(cls, type) and issubclass(cls, core.SchemaBase):
mapping[cls] = attr.replace('Value', '').lower()
return mapping
# -------------------------------------------------------------------------
# Tools for working with selections
class Selection(object):
"""A Selection object"""
_counter = 0
@classmethod
def _get_name(cls):
cls._counter += 1
return "selector{:03d}".format(cls._counter)
def __init__(self, name, selection):
if name is None:
name = self._get_name()
self.name = name
self.selection = selection
def __repr__(self):
return "Selection({0!r}, {1})".format(self.name, self.selection)
def ref(self):
return {'selection': self.name}
def to_dict(self):
return {'selection': self.name}
def __invert__(self):
return Selection(core.SelectionNot(**{'not': self.name}), self.selection)
def __and__(self, other):
if isinstance(other, Selection):
other = other.name
return Selection(core.SelectionAnd(**{'and': [self.name, other]}), self.selection)
def __or__(self, other):
if isinstance(other, Selection):
other = other.name
return Selection(core.SelectionOr(**{'or': [self.name, other]}), self.selection)
def __getattr__(self, field_name):
return expr.core.GetAttrExpression(self.name, field_name)
def __getitem__(self, field_name):
return expr.core.GetItemExpression(self.name, field_name)
# ------------------------------------------------------------------------
# Top-Level Functions
def value(value, **kwargs):
"""Specify a value for use in an encoding"""
return dict(value=value, **kwargs)
def selection(name=None, type=Undefined, **kwds):
"""Create a named selection.
Parameters
----------
name : string (optional)
The name of the selection. If not specified, a unique name will be
created.
type : string
The type of the selection: one of ["interval", "single", or "multi"]
**kwds :
additional keywords will be used to construct a SelectionDef instance
that controls the selection.
Returns
-------
selection: Selection
The selection object that can be used in chart creation.
"""
return Selection(name, core.SelectionDef(type=type, **kwds))
@utils.use_signature(core.IntervalSelection)
def selection_interval(**kwargs):
"""Create a selection with type='interval'"""
return selection(type='interval', **kwargs)
@utils.use_signature(core.MultiSelection)
def selection_multi(**kwargs):
"""Create a selection with type='multi'"""
return selection(type='multi', **kwargs)
@utils.use_signature(core.SingleSelection)
def selection_single(**kwargs):
"""Create a selection with type='single'"""
return selection(type='single', **kwargs)
@utils.use_signature(core.Binding)
def binding(input, **kwargs):
"""A generic binding"""
return core.Binding(input=input, **kwargs)
@utils.use_signature(core.BindCheckbox)
def binding_checkbox(**kwargs):
"""A checkbox binding"""
return core.BindCheckbox(input='checkbox', **kwargs)
@utils.use_signature(core.BindRadioSelect)
def binding_radio(**kwargs):
"""A radio button binding"""
return core.BindRadioSelect(input='radio', **kwargs)
@utils.use_signature(core.BindRadioSelect)
def binding_select(**kwargs):
"""A select binding"""
return core.BindRadioSelect(input='select', **kwargs)
@utils.use_signature(core.BindRange)
def binding_range(**kwargs):
"""A range binding"""
return core.BindRange(input='range', **kwargs)
def condition(predicate, if_true, if_false, **kwargs):
"""A conditional attribute or encoding
Parameters
----------
predicate: Selection, LogicalOperandPredicate, expr.Expression, dict, or string
the selection predicate or test predicate for the condition.
if a string is passed, it will be treated as a test operand.
if_true:
the spec or object to use if the selection predicate is true
if_false:
the spec or object to use if the selection predicate is false
**kwargs:
additional keyword args are added to the resulting dict
Returns
-------
spec: dict or VegaLiteSchema
the spec that describes the condition
"""
test_predicates = (str, expr.Expression, core.LogicalOperandPredicate)
if isinstance(predicate, Selection):
condition = {'selection': predicate.name}
elif isinstance(predicate, core.SelectionOperand):
condition = {'selection': predicate}
elif isinstance(predicate, test_predicates):
condition = {'test': predicate}
elif isinstance(predicate, dict):
condition = predicate
else:
raise NotImplementedError("condition predicate of type {}"
"".format(type(predicate)))
if isinstance(if_true, core.SchemaBase):
# convert to dict for now; the from_dict call below will wrap this
# dict in the appropriate schema
if_true = if_true.to_dict()
elif isinstance(if_true, str):
if_true = {'shorthand': if_true}
if_true.update(kwargs)
condition.update(if_true)
if isinstance(if_false, core.SchemaBase):
# For the selection, the channel definitions all allow selections
# already. So use this SchemaBase wrapper if possible.
selection = if_false.copy()
selection.condition = condition
elif isinstance(if_false, str):
selection = {'condition': condition, 'shorthand': if_false}
selection.update(kwargs)
else:
selection = dict(condition=condition, **if_false)
return selection
# --------------------------------------------------------------------
# Top-level objects
class TopLevelMixin(mixins.ConfigMethodMixin):
"""Mixin for top-level chart objects such as Chart, LayeredChart, etc."""
_class_is_valid_at_instantiation = False
def to_dict(self, *args, **kwargs):
"""Convert the chart to a dictionary suitable for JSON export"""
# We make use of three context markers:
# - 'data' points to the data that should be referenced for column type
# inference.
# - 'top_level' is a boolean flag that is assumed to be true; if it's
# true then a "$schema" arg is added to the dict.
# - 'datasets' is a dict of named datasets that should be inserted
# in the top-level object
# note: not a deep copy because we want datasets and data arguments to
# be passed by reference
context = kwargs.get('context', {}).copy()
context.setdefault('datasets', {})
is_top_level = context.get('top_level', True)
copy = self.copy(deep=False)
original_data = getattr(copy, 'data', Undefined)
copy.data = _prepare_data(original_data, context)
if original_data is not Undefined:
context['data'] = original_data
# remaining to_dict calls are not at top level
context['top_level'] = False
kwargs['context'] = context
try:
dct = super(TopLevelMixin, copy).to_dict(*args, **kwargs)
except jsonschema.ValidationError:
dct = None
# If we hit an error, then re-convert with validate='deep' to get
# a more useful traceback. We don't do this by default because it's
# much slower in the case that there are no errors.
if dct is None:
kwargs['validate'] = 'deep'
dct = super(TopLevelMixin, copy).to_dict(*args, **kwargs)
# TODO: following entries are added after validation. Should they be validated?
if is_top_level:
# since this is top-level we add $schema if it's missing
if '$schema' not in dct:
dct['$schema'] = SCHEMA_URL
# apply theme from theme registry
the_theme = themes.get()
dct = utils.update_nested(the_theme(), dct, copy=True)
# update datasets
if context['datasets']:
dct.setdefault('datasets', {}).update(context['datasets'])
return dct
def to_html(self, base_url="https://cdn.jsdelivr.net/npm/",
output_div='vis', embed_options=None, json_kwds=None,
fullhtml=True, requirejs=False):
return utils.spec_to_html(self.to_dict(), mode='vega-lite',
vegalite_version=VEGALITE_VERSION,
vegaembed_version=VEGAEMBED_VERSION,
vega_version=VEGA_VERSION,
base_url=base_url, output_div=output_div,
embed_options=embed_options, json_kwds=json_kwds,
fullhtml=fullhtml, requirejs=requirejs)
def save(self, fp, format=None, override_data_transformer=True,
scale_factor=1.0,
vegalite_version=VEGALITE_VERSION,
vega_version=VEGA_VERSION,
vegaembed_version=VEGAEMBED_VERSION,
**kwargs):
"""Save a chart to file in a variety of formats
Supported formats are json, html, png, svg
Parameters
----------
fp : string filename or file-like object
file in which to write the chart.
format : string (optional)
the format to write: one of ['json', 'html', 'png', 'svg'].
If not specified, the format will be determined from the filename.
override_data_transformer : boolean (optional)
If True (default), then the save action will be done with
the MaxRowsError disabled. If False, then do not change the data
transformer.
scale_factor : float
For svg or png formats, scale the image by this factor when saving.
This can be used to control the size or resolution of the output.
Default is 1.0
**kwargs :
Additional keyword arguments are passed to the output method
associated with the specified format.
"""
from ...utils.save import save
kwds = dict(chart=self, fp=fp, format=format,
scale_factor=scale_factor,
vegalite_version=vegalite_version,
vega_version=vega_version,
vegaembed_version=vegaembed_version,
**kwargs)
# By default we override the data transformer. This makes it so
# that save() will succeed even for large datasets that would
# normally trigger a MaxRowsError
if override_data_transformer:
with data_transformers.disable_max_rows():
result = save(**kwds)
else:
result = save(**kwds)
return result
# Fallback for when rendering fails; the full repr is too long to be
# useful in nearly all cases.
def __repr__(self):
return "alt.{}(...)".format(self.__class__.__name__)
# Layering and stacking
def __add__(self, other):
if not isinstance(other, TopLevelMixin):
raise ValueError("Only Chart objects can be layered.")
return layer(self, other)
def __and__(self, other):
if not isinstance(other, TopLevelMixin):
raise ValueError("Only Chart objects can be concatenated.")
return vconcat(self, other)
def __or__(self, other):
if not isinstance(other, TopLevelMixin):
raise ValueError("Only Chart objects can be concatenated.")
return hconcat(self, other)
def repeat(self, repeat=Undefined, row=Undefined, column=Undefined, columns=Undefined, **kwargs):
"""Return a RepeatChart built from the chart
Fields within the chart can be set to correspond to the row or
column using `alt.repeat('row')` and `alt.repeat('column')`.
Parameters
----------
repeat : list
a list of data column names to be repeated. This cannot be
used along with the ``row`` or ``column`` argument.
row : list
a list of data column names to be mapped to the row facet
column : list
a list of data column names to be mapped to the column facet
columns : int
the maximum number of columns before wrapping. Only referenced
if ``repeat`` is specified.
**kwargs :
additional keywords passed to RepeatChart.
Returns
-------
chart : RepeatChart
a repeated chart.
"""
repeat_specified = (repeat is not Undefined)
rowcol_specified = (row is not Undefined or column is not Undefined)
if repeat_specified and rowcol_specified:
raise ValueError("repeat argument cannot be combined with row/column argument.")
if repeat_specified:
repeat = repeat
else:
repeat = core.RepeatMapping(row=row, column=column)
return RepeatChart(spec=self, repeat=repeat, columns=columns, **kwargs)
def properties(self, **kwargs):
"""Set top-level properties of the Chart.
Argument names and types are the same as class initialization.
"""
copy = self.copy(deep=False)
for key, val in kwargs.items():
if key == 'selection' and isinstance(val, Selection):
# For backward compatibility with old selection interface.
setattr(copy, key, {val.name: val.selection})
else:
# Don't validate data, because it hasn't been processed.
if key != 'data':
self.validate_property(key, val)
setattr(copy, key, val)
return copy
def project(self, type='mercator', center=Undefined, clipAngle=Undefined, clipExtent=Undefined,
coefficient=Undefined, distance=Undefined, fraction=Undefined, lobes=Undefined,
parallel=Undefined, precision=Undefined, radius=Undefined, ratio=Undefined,
reflectX=Undefined, reflectY=Undefined, rotate=Undefined, scale=Undefined,
spacing=Undefined, tilt=Undefined, translate=Undefined, **kwds):
"""Add a geographic projection to the chart.
This is generally used either with ``mark_geoshape`` or with the
``latitude``/``longitude`` encodings.
Available projection types are
['albers', 'albersUsa', 'azimuthalEqualArea', 'azimuthalEquidistant',
'conicConformal', 'conicEqualArea', 'conicEquidistant', 'equalEarth', 'equirectangular',
'gnomonic', 'identity', 'mercator', 'orthographic', 'stereographic', 'transverseMercator']
Attributes
----------
type : ProjectionType
The cartographic projection to use. This value is case-insensitive, for example
`"albers"` and `"Albers"` indicate the same projection type. You can find all valid
projection types [in the
documentation](https://vega.github.io/vega-lite/docs/projection.html#projection-types).
**Default value:** `mercator`
center : List(float)
Sets the projection’s center to the specified center, a two-element array of
longitude and latitude in degrees.
**Default value:** `[0, 0]`
clipAngle : float
Sets the projection’s clipping circle radius to the specified angle in degrees. If
`null`, switches to [antimeridian](http://bl.ocks.org/mbostock/3788999) cutting
rather than small-circle clipping.
clipExtent : List(List(float))
Sets the projection’s viewport clip extent to the specified bounds in pixels. The
extent bounds are specified as an array `[[x0, y0], [x1, y1]]`, where `x0` is the
left-side of the viewport, `y0` is the top, `x1` is the right and `y1` is the
bottom. If `null`, no viewport clipping is performed.
coefficient : float
distance : float
fraction : float
lobes : float
parallel : float
precision : Mapping(required=[length])
Sets the threshold for the projection’s [adaptive
resampling](http://bl.ocks.org/mbostock/3795544) to the specified value in pixels.
This value corresponds to the [Douglas–Peucker
distance](http://en.wikipedia.org/wiki/Ramer%E2%80%93Douglas%E2%80%93Peucker_algorithm).
If precision is not specified, returns the projection’s current resampling
precision which defaults to `√0.5 ≅ 0.70710…`.
radius : float
ratio : float
reflectX : boolean
reflectY : boolean
rotate : List(float)
Sets the projection’s three-axis rotation to the specified angles, which must be a
two- or three-element array of numbers [`lambda`, `phi`, `gamma`] specifying the
rotation angles in degrees about each spherical axis. (These correspond to yaw,
pitch and roll.)
**Default value:** `[0, 0, 0]`
scale : float
Sets the projection's scale (zoom) value, overriding automatic fitting.
spacing : float
tilt : float
translate : List(float)
Sets the projection's translation (pan) value, overriding automatic fitting.
"""
projection = core.Projection(center=center, clipAngle=clipAngle, clipExtent=clipExtent,
coefficient=coefficient, distance=distance, fraction=fraction,
lobes=lobes, parallel=parallel, precision=precision,
radius=radius, ratio=ratio, reflectX=reflectX,
reflectY=reflectY, rotate=rotate, scale=scale, spacing=spacing,
tilt=tilt, translate=translate, type=type, **kwds)
return self.properties(projection=projection)
def _add_transform(self, *transforms):
"""Copy the chart and add specified transforms to chart.transform"""
copy = self.copy(deep=['transform'])
if copy.transform is Undefined:
copy.transform = []
copy.transform.extend(transforms)
return copy
def transform_aggregate(self, aggregate=Undefined, groupby=Undefined, **kwds):
"""
Add an AggregateTransform to the schema.
Parameters
----------
aggregate : List(:class:`AggregatedFieldDef`)
Array of objects that define fields to aggregate.
groupby : List(string)
The data fields to group by. If not specified, a single group containing all data
objects will be used.
**kwds :
additional keywords are converted to aggregates using standard
shorthand parsing.
Returns
-------
self : Chart object
returns chart to allow for chaining
Examples
--------
The aggregate transform allows you to specify transforms directly using
the same shorthand syntax as used in encodings:
>>> import altair as alt
>>> chart1 = alt.Chart().transform_aggregate(
... mean_acc='mean(Acceleration)',
... groupby=['Origin']
... )
>>> print(chart1.transform[0].to_json()) # doctest: +NORMALIZE_WHITESPACE
{
"aggregate": [
{
"as": "mean_acc",
"field": "Acceleration",
"op": "mean"
}
],
"groupby": [
"Origin"
]
}
It also supports including AggregatedFieldDef instances or dicts directly,
so you can create the above transform like this:
>>> chart2 = alt.Chart().transform_aggregate(
... [alt.AggregatedFieldDef(field='Acceleration', op='mean',
... **{'as': 'mean_acc'})],
... groupby=['Origin']
... )
>>> chart2.transform == chart1.transform
True
See Also
--------
alt.AggregateTransform : underlying transform object
"""
if aggregate is Undefined:
aggregate = []
for key, val in kwds.items():
parsed = utils.parse_shorthand(val)
dct = {'as': key,
'field': parsed.get('field', Undefined),
'op': parsed.get('aggregate', Undefined)}
aggregate.append(core.AggregatedFieldDef(**dct))
return self._add_transform(core.AggregateTransform(aggregate=aggregate,
groupby=groupby))
def transform_bin(self, as_=Undefined, field=Undefined, bin=True, **kwargs):
"""
Add a BinTransform to the schema.
Parameters
----------
as_ : anyOf(string, List(string))
The output fields at which to write the start and end bin values.
bin : anyOf(boolean, :class:`BinParams`)
An object indicating bin properties, or simply ``true`` for using default bin
parameters.
field : string
The data field to bin.
Returns
-------
self : Chart object
returns chart to allow for chaining
Examples
--------
>>> import altair as alt
>>> chart = alt.Chart().transform_bin("x_binned", "x")
>>> chart.transform[0]
BinTransform({
as: 'x_binned',
bin: True,
field: 'x'
})
>>> chart = alt.Chart().transform_bin("x_binned", "x",
... bin=alt.Bin(maxbins=10))
>>> chart.transform[0]
BinTransform({
as: 'x_binned',
bin: BinParams({
maxbins: 10
}),
field: 'x'
})
See Also
--------
alt.BinTransform : underlying transform object
"""
if as_ is not Undefined:
if 'as' in kwargs:
raise ValueError("transform_bin: both 'as_' and 'as' passed as arguments.")
kwargs['as'] = as_
kwargs['bin'] = bin
kwargs['field'] = field
return self._add_transform(core.BinTransform(**kwargs))
def transform_calculate(self, as_=Undefined, calculate=Undefined, **kwargs):
"""
Add a CalculateTransform to the schema.
Parameters
----------
as_ : string
The field for storing the computed formula value.
calculate : string or alt.expr expression
A `expression <https://vega.github.io/vega-lite/docs/types.html#expression>`__
string. Use the variable ``datum`` to refer to the current data object.
**kwargs
transforms can also be passed by keyword argument; see Examples
Returns
-------
self : Chart object
returns chart to allow for chaining
Examples
--------
>>> import altair as alt
>>> from altair import datum, expr
>>> chart = alt.Chart().transform_calculate(y = 2 * expr.sin(datum.x))
>>> chart.transform[0]
CalculateTransform({
as: 'y',
calculate: (2 * sin(datum.x))
})
It's also possible to pass the ``CalculateTransform`` arguments directly:
>>> kwds = {'as': 'y', 'calculate': '2 * sin(datum.x)'}
>>> chart = alt.Chart().transform_calculate(**kwds)
>>> chart.transform[0]
CalculateTransform({
as: 'y',
calculate: '2 * sin(datum.x)'
})
As the first form is easier to write and understand, that is the
recommended method.
See Also
--------
alt.CalculateTransform : underlying transform object
"""
if as_ is Undefined:
as_ = kwargs.pop('as', Undefined)
elif 'as' in kwargs:
raise ValueError("transform_calculate: both 'as_' and 'as' passed as arguments.")
if as_ is not Undefined or calculate is not Undefined:
dct = {'as': as_, 'calculate': calculate}
self = self._add_transform(core.CalculateTransform(**dct))
for as_, calculate in kwargs.items():
dct = {'as': as_, 'calculate': calculate}
self = self._add_transform(core.CalculateTransform(**dct))
return self
def transform_density(self, density, as_=Undefined, bandwidth=Undefined, counts=Undefined,
cumulative=Undefined, extent=Undefined, groupby=Undefined,
maxsteps=Undefined, minsteps=Undefined, steps=Undefined):
"""Add a DensityTransform to the spec.
Attributes
----------
density : str
The data field for which to perform density estimation.
as_ : [str, str]
The output fields for the sample value and corresponding density estimate.
**Default value:** ``["value", "density"]``
bandwidth : float
The bandwidth (standard deviation) of the Gaussian kernel. If unspecified or set to
zero, the bandwidth value is automatically estimated from the input data using
Scott’s rule.
counts : boolean
A boolean flag indicating if the output values should be probability estimates
(false) or smoothed counts (true).
**Default value:** ``false``
cumulative : boolean
A boolean flag indicating whether to produce density estimates (false) or cumulative
density estimates (true).
**Default value:** ``false``
extent : List([float, float])
A [min, max] domain from which to sample the distribution. If unspecified, the
extent will be determined by the observed minimum and maximum values of the density
value field.
groupby : List(str)
The data fields to group by. If not specified, a single group containing all data
objects will be used.
maxsteps : float
The maximum number of samples to take along the extent domain for plotting the
density. **Default value:** ``200``
minsteps : float
The minimum number of samples to take along the extent domain for plotting the
density. **Default value:** ``25``
steps : float
The exact number of samples to take along the extent domain for plotting the
density. If specified, overrides both minsteps and maxsteps to set an exact number
of uniform samples. Potentially useful in conjunction with a fixed extent to ensure
consistent sample points for stacked densities.
"""
return self._add_transform(core.DensityTransform(
density=density, bandwidth=bandwidth, counts=counts, cumulative=cumulative,
extent=extent, groupby=groupby, maxsteps=maxsteps, minsteps=minsteps, steps=steps,
**{'as': as_}
))
def transform_impute(self, impute, key, frame=Undefined, groupby=Undefined,
keyvals=Undefined, method=Undefined, value=Undefined):
"""
Add an ImputeTransform to the schema.
Parameters
----------
impute : string
The data field for which the missing values should be imputed.
key : string
A key field that uniquely identifies data objects within a group.
Missing key values (those occurring in the data but not in the current group) will
be imputed.
frame : List(anyOf(None, float))
A frame specification as a two-element array used to control the window over which
the specified method is applied. The array entries should either be a number
indicating the offset from the current data object, or null to indicate unbounded
rows preceding or following the current data object. For example, the value ``[-5,
5]`` indicates that the window should include five objects preceding and five
objects following the current object.
**Default value:** : ``[null, null]`` indicating that the window includes all
objects.
groupby : List(string)
An optional array of fields by which to group the values.
Imputation will then be performed on a per-group basis.
keyvals : anyOf(List(Mapping(required=[])), :class:`ImputeSequence`)
Defines the key values that should be considered for imputation.
An array of key values or an object defining a `number sequence
<https://vega.github.io/vega-lite/docs/impute.html#sequence-def>`__.
If provided, this will be used in addition to the key values observed within the
input data. If not provided, the values will be derived from all unique values of
the ``key`` field. For ``impute`` in ``encoding``, the key field is the x-field if
the y-field is imputed, or vice versa.
If there is no impute grouping, this property *must* be specified.
method : :class:`ImputeMethod`
The imputation method to use for the field value of imputed data objects.
One of ``value``, ``mean``, ``median``, ``max`` or ``min``.
**Default value:** ``"value"``
value : Mapping(required=[])
The field value to use when the imputation ``method`` is ``"value"``.
Returns
-------
self : Chart object
returns chart to allow for chaining
See Also
--------
alt.ImputeTransform : underlying transform object
"""
return self._add_transform(core.ImputeTransform(
impute=impute, key=key, frame=frame, groupby=groupby,
keyvals=keyvals, method=method, value=value
))
def transform_joinaggregate(self, joinaggregate=Undefined, groupby=Undefined, **kwargs):
"""
Add a JoinAggregateTransform to the schema.
Parameters
----------
joinaggregate : List(:class:`JoinAggregateFieldDef`)
The definition of the fields in the join aggregate, and what calculations to use.
groupby : List(string)
The data fields for partitioning the data objects into separate groups. If
unspecified, all data points will be in a single group.
**kwargs
joinaggregates can also be passed by keyword argument; see Examples.
Returns
-------
self : Chart object
returns chart to allow for chaining
Examples
--------
>>> import altair as alt
>>> chart = alt.Chart().transform_joinaggregate(x='sum(y)')
>>> chart.transform[0]
JoinAggregateTransform({
joinaggregate: [JoinAggregateFieldDef({
as: 'x',
field: 'y',
op: 'sum'
})]
})
See Also
--------
alt.JoinAggregateTransform : underlying transform object
"""
if joinaggregate is Undefined:
joinaggregate = []
for key, val in kwargs.items():
parsed = utils.parse_shorthand(val)
dct = {'as': key,
'field': parsed.get('field', Undefined),
'op': parsed.get('aggregate', Undefined)}
joinaggregate.append(core.JoinAggregateFieldDef(**dct))
return self._add_transform(core.JoinAggregateTransform(
joinaggregate=joinaggregate, groupby=groupby
))
def transform_filter(self, filter, **kwargs):
"""
Add a FilterTransform to the schema.
Parameters
----------
filter : a filter expression or :class:`LogicalOperandPredicate`
The `filter` property must be one of the predicate definitions:
(1) a string or alt.expr expression
(2) a range predicate
(3) a selection predicate
(4) a logical operand combining (1)-(3)
(5) a Selection object
Returns
-------
self : Chart object
returns chart to allow for chaining
See Also
--------
alt.FilterTransform : underlying transform object
"""
if isinstance(filter, Selection):
filter = {'selection': filter.name}
elif isinstance(filter, core.SelectionOperand):
filter = {'selection': filter}
return self._add_transform(core.FilterTransform(filter=filter, **kwargs))
def transform_flatten(self, flatten, as_=Undefined):
"""Add a FlattenTransform to the schema.
Parameters
----------
flatten : List(string)
An array of one or more data fields containing arrays to flatten.
If multiple fields are specified, their array values should have a parallel
structure, ideally with the same length.
If the lengths of parallel arrays do not match,
the longest array will be used with ``null`` values added for missing entries.
as : List(string)
The output field names for extracted array values.
**Default value:** The field name of the corresponding array field
Returns
-------
self : Chart object
returns chart to allow for chaining
See Also
--------
alt.FlattenTransform : underlying transform object
"""
return self._add_transform(core.FlattenTransform(flatten=flatten, **{'as': as_}))
def transform_fold(self, fold, as_=Undefined):
"""Add a FoldTransform to the spec.
Parameters
----------
fold : List(string)
An array of data fields indicating the properties to fold.
as : [string, string]
The output field names for the key and value properties produced by the fold
transform. Default: ``["key", "value"]``
Returns
-------
self : Chart object
returns chart to allow for chaining
See Also
--------
Chart.transform_pivot : pivot transform - opposite of fold.
alt.FoldTransform : underlying transform object
"""
return self._add_transform(core.FoldTransform(fold=fold, **{'as': as_}))
def transform_loess(self, on, loess, as_=Undefined, bandwidth=Undefined, groupby=Undefined):
"""Add a LoessTransform to the spec.
Parameters
----------
on : str
The data field of the independent variable to use a predictor.
loess : str
The data field of the dependent variable to smooth.
as_ : [str, str]
The output field names for the smoothed points generated by the loess transform.
**Default value:** The field names of the input x and y values.
bandwidth : float
A bandwidth parameter in the range ``[0, 1]`` that determines the amount of
smoothing. **Default value:** ``0.3``
groupby : List(str)
The data fields to group by. If not specified, a single group containing all data
objects will be used.
Returns
-------
self : Chart object
returns chart to allow for chaining
See Also
--------
Chart.transform_regression: regression transform
alt.LoessTransform : underlying transform object
"""
return self._add_transform(core.LoessTransform(
loess=loess, on=on, bandwidth=bandwidth, groupby=groupby, **{'as': as_}
))
def transform_lookup(self, lookup=Undefined, from_=Undefined, as_=Undefined, default=Undefined, **kwargs):
"""Add a DataLookupTransform or SelectionLookupTransform to the chart
Attributes
----------
lookup : string
Key in primary data source.
from_ : anyOf(:class:`LookupData`, :class:`LookupSelection`)
Secondary data reference.
as_ : anyOf(string, List(string))
The output fields on which to store the looked up data values.
For data lookups, this property may be left blank if ``from_.fields``
has been specified (those field names will be used); if ``from_.fields``
has not been specified, ``as_`` must be a string.
For selection lookups, this property is optional: if unspecified,
looked up values will be stored under a property named for the selection;
and if specified, it must correspond to ``from_.fields``.
default : string
The default value to use if lookup fails. **Default value:** ``null``
Returns
-------
self : Chart object
returns chart to allow for chaining
See Also
--------
alt.DataLookupTransform : underlying transform object
alt.SelectionLookupTransform : underlying transform object
"""
if as_ is not Undefined:
if 'as' in kwargs:
raise ValueError("transform_lookup: both 'as_' and 'as' passed as arguments.")
kwargs['as'] = as_
if from_ is not Undefined:
if 'from' in kwargs:
raise ValueError("transform_lookup: both 'from_' and 'from' passed as arguments.")
kwargs['from'] = from_
kwargs['lookup'] = lookup
kwargs['default'] = default
return self._add_transform(core.LookupTransform(**kwargs))
def transform_pivot(self, pivot, value, groupby=Undefined, limit=Undefined, op=Undefined):
"""Add a pivot transform to the chart.
Parameters
----------
pivot : str
The data field to pivot on. The unique values of this field become new field names
in the output stream.
value : str
The data field to populate pivoted fields. The aggregate values of this field become
the values of the new pivoted fields.
groupby : List(str)
The optional data fields to group by. If not specified, a single group containing
all data objects will be used.
limit : float
An optional parameter indicating the maximum number of pivoted fields to generate.
The default ( ``0`` ) applies no limit. The pivoted ``pivot`` names are sorted in
ascending order prior to enforcing the limit.
**Default value:** ``0``
op : string
The aggregation operation to apply to grouped ``value`` field values.
**Default value:** ``sum``
Returns
-------
self : Chart object
returns chart to allow for chaining
See Also
--------
Chart.transform_fold : fold transform - opposite of pivot.
alt.PivotTransform : underlying transform object
"""
return self._add_transform(core.PivotTransform(
pivot=pivot, value=value, groupby=groupby, limit=limit, op=op
))
def transform_quantile(self, quantile, as_=Undefined, groupby=Undefined,
probs=Undefined, step=Undefined):
"""Add a quantile transform to the chart
Parameters
----------
quantile : str
The data field for which to perform quantile estimation.
as : [str, str]
The output field names for the probability and quantile values.
groupby : List(str)
The data fields to group by. If not specified, a single group containing all data
objects will be used.
probs : List(float)
An array of probabilities in the range (0, 1) for which to compute quantile values.
If not specified, the *step* parameter will be used.
step : float
A probability step size (default 0.01) for sampling quantile values. All values from
one-half the step size up to 1 (exclusive) will be sampled. This parameter is only
used if the *probs* parameter is not provided. **Default value:** ``["prob", "value"]``
Returns
-------
self : Chart object
returns chart to allow for chaining
See Also
--------
alt.QuantileTransform : underlying transform object
"""
return self._add_transform(core.QuantileTransform(
quantile=quantile, groupby=groupby, probs=probs, step=step, **{'as': as_}
))
def transform_regression(self, on, regression, as_=Undefined, extent=Undefined, groupby=Undefined,
method=Undefined, order=Undefined, params=Undefined):
"""Add a RegressionTransform to the chart.
Parameters
----------
on : str
The data field of the independent variable to use a predictor.
regression : str
The data field of the dependent variable to predict.
as_ : [str, str]
The output field names for the smoothed points generated by the regression
transform. **Default value:** The field names of the input x and y values.
extent : [float, float]
A [min, max] domain over the independent (x) field for the starting and ending
points of the generated trend line.
groupby : List(str)
The data fields to group by. If not specified, a single group containing all data
objects will be used.
method : enum('linear', 'log', 'exp', 'pow', 'quad', 'poly')
The functional form of the regression model. One of ``"linear"``, ``"log"``,
``"exp"``, ``"pow"``, ``"quad"``, or ``"poly"``. **Default value:** ``"linear"``
order : float
The polynomial order (number of coefficients) for the 'poly' method.
**Default value:** ``3``
params : boolean
A boolean flag indicating if the transform should return the regression model
parameters (one object per group), rather than trend line points.
The resulting objects include a ``coef`` array of fitted coefficient values
(starting with the intercept term and then including terms of increasing order)
and an ``rSquared`` value (indicating the total variance explained by the model).
**Default value:** ``false``
Returns
-------
self : Chart object
returns chart to allow for chaining
See Also
--------
Chart.transform_loess : LOESS transform
alt.RegressionTransform : underlying transform object
"""
return self._add_transform(core.RegressionTransform(
regression=regression, on=on, extent=extent, groupby=groupby,
method=method, order=order, params=params, **{'as': as_}
))
def transform_sample(self, sample=1000):
"""
Add a SampleTransform to the schema.
Parameters
----------
sample : float
The maximum number of data objects to include in the sample. Default: 1000.
Returns
-------
self : Chart object
returns chart to allow for chaining
See Also
--------
alt.SampleTransform : underlying transform object
"""
return self._add_transform(core.SampleTransform(sample))
def transform_stack(self, as_, stack, groupby, offset=Undefined, sort=Undefined):
"""
Add a StackTransform to the schema.
Parameters
----------
as_ : anyOf(string, List(string))
Output field names. This can be either a string or an array of strings with
two elements denoting the name for the fields for stack start and stack end
respectively.
If a single string(eg."val") is provided, the end field will be "val_end".
stack : string
The field which is stacked.
groupby : List(string)
The data fields to group by.
offset : enum('zero', 'center', 'normalize')
Mode for stacking marks. Default: 'zero'.
sort : List(:class:`SortField`)
Field that determines the order of leaves in the stacked charts.
Returns
-------
self : Chart object
returns chart to allow for chaining
See Also
--------
alt.StackTransform : underlying transform object
"""
return self._add_transform(core.StackTransform(
stack=stack, groupby=groupby, offset=offset, sort=sort, **{'as': as_}
))
def transform_timeunit(self, as_=Undefined, field=Undefined, timeUnit=Undefined, **kwargs):
"""
Add a TimeUnitTransform to the schema.
Parameters
----------
as_ : string
The output field to write the timeUnit value.
field : string
The data field to apply time unit.
timeUnit : :class:`TimeUnit`
The timeUnit.
**kwargs
transforms can also be passed by keyword argument; see Examples
Returns
-------
self : Chart object
returns chart to allow for chaining
Examples
--------
>>> import altair as alt
>>> from altair import datum, expr
>>> chart = alt.Chart().transform_timeunit(month='month(date)')
>>> chart.transform[0]
TimeUnitTransform({
as: 'month',
field: 'date',
timeUnit: 'month'
})
It's also possible to pass the ``TimeUnitTransform`` arguments directly;
this is most useful in cases where the desired field name is not a
valid python identifier:
>>> kwds = {'as': 'month', 'timeUnit': 'month', 'field': 'The Month'}
>>> chart = alt.Chart().transform_timeunit(**kwds)
>>> chart.transform[0]
TimeUnitTransform({
as: 'month',
field: 'The Month',
timeUnit: 'month'
})
As the first form is easier to write and understand, that is the
recommended method.
See Also
--------
alt.TimeUnitTransform : underlying transform object
"""
if as_ is Undefined:
as_ = kwargs.pop('as', Undefined)
else:
if 'as' in kwargs:
raise ValueError("transform_timeunit: both 'as_' and 'as' passed as arguments.")
if as_ is not Undefined:
dct = {'as': as_, 'timeUnit': timeUnit, 'field': field}
self = self._add_transform(core.TimeUnitTransform(**dct))
for as_, shorthand in kwargs.items():
dct = utils.parse_shorthand(shorthand,
parse_timeunits=True,
parse_aggregates=False,
parse_types=False)
dct.pop('type', None)
dct['as'] = as_
if 'timeUnit' not in dct:
raise ValueError("'{}' must include a valid timeUnit".format(shorthand))
self = self._add_transform(core.TimeUnitTransform(**dct))
return self
def transform_window(self, window=Undefined, frame=Undefined, groupby=Undefined,
ignorePeers=Undefined, sort=Undefined, **kwargs):
"""Add a WindowTransform to the schema
Parameters
----------
window : List(:class:`WindowFieldDef`)
The definition of the fields in the window, and what calculations to use.
frame : List(anyOf(None, float))
A frame specification as a two-element array indicating how the sliding window
should proceed. The array entries should either be a number indicating the offset
from the current data object, or null to indicate unbounded rows preceding or
following the current data object. The default value is ``[null, 0]``, indicating
that the sliding window includes the current object and all preceding objects. The
value ``[-5, 5]`` indicates that the window should include five objects preceding
and five objects following the current object. Finally, ``[null, null]`` indicates
that the window frame should always include all data objects. The only operators
affected are the aggregation operations and the ``first_value``, ``last_value``, and
``nth_value`` window operations. The other window operations are not affected by
this.
**Default value:** : ``[null, 0]`` (includes the current object and all preceding
objects)
groupby : List(string)
The data fields for partitioning the data objects into separate windows. If
unspecified, all data points will be in a single group.
ignorePeers : boolean
Indicates if the sliding window frame should ignore peer values. (Peer values are
those considered identical by the sort criteria). The default is false, causing the
window frame to expand to include all peer values. If set to true, the window frame
will be defined by offset values only. This setting only affects those operations
that depend on the window frame, namely aggregation operations and the first_value,
last_value, and nth_value window operations.
**Default value:** ``false``
sort : List(:class:`SortField`)
A sort field definition for sorting data objects within a window. If two data
objects are considered equal by the comparator, they are considered “peer” values of
equal rank. If sort is not specified, the order is undefined: data objects are
processed in the order they are observed and none are considered peers (the
ignorePeers parameter is ignored and treated as if set to ``true`` ).
**kwargs
transforms can also be passed by keyword argument; see Examples
Examples
--------
A cumulative line chart
>>> import altair as alt
>>> import numpy as np
>>> import pandas as pd
>>> data = pd.DataFrame({'x': np.arange(100),
... 'y': np.random.randn(100)})
>>> chart = alt.Chart(data).mark_line().encode(
... x='x:Q',
... y='ycuml:Q'
... ).transform_window(
... ycuml='sum(y)'
... )
>>> chart.transform[0]
WindowTransform({
window: [WindowFieldDef({
as: 'ycuml',
field: 'y',
op: 'sum'
})]
})
"""
if kwargs:
if window is Undefined:
window = []
for as_, shorthand in kwargs.items():
kwds = {'as': as_}
kwds.update(utils.parse_shorthand(shorthand,
parse_aggregates=False,
parse_window_ops=True,
parse_timeunits=False,
parse_types=False))
window.append(core.WindowFieldDef(**kwds))
return self._add_transform(core.WindowTransform(window=window, frame=frame, groupby=groupby,
ignorePeers=ignorePeers, sort=sort))
# Display-related methods
def _repr_mimebundle_(self, include, exclude):
"""Return a MIME bundle for display in Jupyter frontends."""
# Catch errors explicitly to get around issues in Jupyter frontend
# see https://github.com/ipython/ipython/issues/11038
try:
dct = self.to_dict()
except Exception:
utils.display_traceback(in_ipython=True)
return {}
else:
return renderers.get()(dct)
def display(self, renderer=Undefined, theme=Undefined, actions=Undefined,
**kwargs):
"""Display chart in Jupyter notebook or JupyterLab
Parameters are passed as options to vega-embed within supported frontends.
See https://github.com/vega/vega-embed#options for details.
Parameters
----------
renderer : string ('canvas' or 'svg')
The renderer to use
theme : string
The Vega theme name to use; see https://github.com/vega/vega-themes
actions : bool or dict
Specify whether action links ("Open In Vega Editor", etc.) are
included in the view.
**kwargs :
Additional parameters are also passed to vega-embed as options.
"""
from IPython.display import display
if renderer is not Undefined:
kwargs['renderer'] = renderer
if theme is not Undefined:
kwargs['theme'] = theme
if actions is not Undefined:
kwargs['actions'] = actions
if kwargs:
options = renderers.options.copy()
options['embed_options']= options.get('embed_options', {}).copy()
options['embed_options'].update(kwargs)
with renderers.enable(**options):
display(self)
else:
display(self)
def serve(self, ip='127.0.0.1', port=8888, n_retries=50, files=None,
jupyter_warning=True, open_browser=True, http_server=None,
**kwargs):
"""Open a browser window and display a rendering of the chart
Parameters
----------
html : string
HTML to serve
ip : string (default = '127.0.0.1')
ip address at which the HTML will be served.
port : int (default = 8888)
the port at which to serve the HTML
n_retries : int (default = 50)
the number of nearby ports to search if the specified port
is already in use.
files : dictionary (optional)
dictionary of extra content to serve
jupyter_warning : bool (optional)
if True (default), then print a warning if this is used
within the Jupyter notebook
open_browser : bool (optional)
if True (default), then open a web browser to the given HTML
http_server : class (optional)
optionally specify an HTTPServer class to use for showing the
figure. The default is Python's basic HTTPServer.
**kwargs :
additional keyword arguments passed to the save() method
"""
from ...utils.server import serve
html = io.StringIO()
self.save(html, format='html', **kwargs)
html.seek(0)
serve(html.read(), ip=ip, port=port, n_retries=n_retries,
files=files, jupyter_warning=jupyter_warning,
open_browser=open_browser, http_server=http_server)
@utils.use_signature(core.Resolve)
def _set_resolve(self, **kwargs):
"""Copy the chart and update the resolve property with kwargs"""
if not hasattr(self, 'resolve'):
raise ValueError("{} object has no attribute "
"'resolve'".format(self.__class__))
copy = self.copy(deep=['resolve'])
if copy.resolve is Undefined:
copy.resolve = core.Resolve()
for key, val in kwargs.items():
copy.resolve[key] = val
return copy
@utils.use_signature(core.AxisResolveMap)
def resolve_axis(self, *args, **kwargs):
return self._set_resolve(axis=core.AxisResolveMap(*args, **kwargs))
@utils.use_signature(core.LegendResolveMap)
def resolve_legend(self, *args, **kwargs):
return self._set_resolve(legend=core.LegendResolveMap(*args, **kwargs))
@utils.use_signature(core.ScaleResolveMap)
def resolve_scale(self, *args, **kwargs):
return self._set_resolve(scale=core.ScaleResolveMap(*args, **kwargs))
class _EncodingMixin(object):
@utils.use_signature(core.FacetedEncoding)
def encode(self, *args, **kwargs):
# Convert args to kwargs based on their types.
kwargs = utils.infer_encoding_types(args, kwargs, channels)
# get a copy of the dict representation of the previous encoding
copy = self.copy(deep=['encoding'])
encoding = copy._get('encoding', {})
if isinstance(encoding, core.VegaLiteSchema):
encoding = {k: v for k, v in encoding._kwds.items()
if v is not Undefined}
# update with the new encodings, and apply them to the copy
encoding.update(kwargs)
copy.encoding = core.FacetedEncoding(**encoding)
return copy
def facet(self, facet=Undefined, row=Undefined, column=Undefined, data=Undefined,
columns=Undefined, **kwargs):
"""Create a facet chart from the current chart.
Faceted charts require data to be specified at the top level; if data
is not specified, the data from the current chart will be used at the
top level.
Parameters
----------
facet : string or alt.Facet (optional)
The data column to use as an encoding for a wrapped facet.
If specified, then neither row nor column may be specified.
column : string or alt.Column (optional)
The data column to use as an encoding for a column facet.
May be combined with row argument, but not with facet argument.
row : string or alt.Column (optional)
The data column to use as an encoding for a row facet.
May be combined with column argument, but not with facet argument.
data : string or dataframe (optional)
The dataset to use for faceting. If not supplied, then data must
be specified in the top-level chart that calls this method.
columns : integer
the maximum number of columns for a wrapped facet.
Returns
-------
self :
for chaining
"""
facet_specified = (facet is not Undefined)
rowcol_specified = (row is not Undefined or column is not Undefined)
if facet_specified and rowcol_specified:
raise ValueError("facet argument cannot be combined with row/column argument.")
if data is Undefined:
if self.data is Undefined:
raise ValueError("Facet charts require data to be specified at the top level.")
self = self.copy(deep=False)
data, self.data = self.data, Undefined
if facet_specified:
if isinstance(facet, str):
facet = channels.Facet(facet)
else:
facet = FacetMapping(row=row, column=column)
return FacetChart(spec=self, facet=facet, data=data, columns=columns, **kwargs)
class Chart(TopLevelMixin, _EncodingMixin, mixins.MarkMethodMixin,
core.TopLevelUnitSpec):
"""Create a basic Altair/Vega-Lite chart.
Although it is possible to set all Chart properties as constructor attributes,
it is more idiomatic to use methods such as ``mark_point()``, ``encode()``,
``transform_filter()``, ``properties()``, etc. See Altair's documentation
for details and examples: http://altair-viz.github.io/.
Attributes
----------
data : Data
An object describing the data source
mark : AnyMark
A string describing the mark type (one of `"bar"`, `"circle"`, `"square"`, `"tick"`,
`"line"`, * `"area"`, `"point"`, `"rule"`, `"geoshape"`, and `"text"`) or a
MarkDef object.
encoding : FacetedEncoding
A key-value mapping between encoding channels and definition of fields.
autosize : anyOf(AutosizeType, AutoSizeParams)
Sets how the visualization size should be determined. If a string, should be one of
`"pad"`, `"fit"` or `"none"`. Object values can additionally specify parameters for
content sizing and automatic resizing. `"fit"` is only supported for single and
layered views that don't use `rangeStep`. __Default value__: `pad`
background : string
CSS color property to use as the background of visualization.
**Default value:** none (transparent)
config : Config
Vega-Lite configuration object. This property can only be defined at the top-level
of a specification.
description : string
Description of this mark for commenting purpose.
height : float
The height of a visualization.
name : string
Name of the visualization for later reference.
padding : Padding
The default visualization padding, in pixels, from the edge of the visualization
canvas to the data rectangle. If a number, specifies padding for all sides. If an
object, the value should have the format `{"left": 5, "top": 5, "right": 5,
"bottom": 5}` to specify padding for each side of the visualization. __Default
value__: `5`
projection : Projection
An object defining properties of geographic projection. Works with `"geoshape"`
marks and `"point"` or `"line"` marks that have a channel (one or more of `"X"`,
`"X2"`, `"Y"`, `"Y2"`) with type `"latitude"`, or `"longitude"`.
selection : Mapping(required=[])
A key-value mapping between selection names and definitions.
title : anyOf(string, TitleParams)
Title for the plot.
transform : List(Transform)
An array of data transformations such as filter and new field calculation.
width : float
The width of a visualization.
"""
def __init__(self, data=Undefined, encoding=Undefined, mark=Undefined,
width=Undefined, height=Undefined, **kwargs):
super(Chart, self).__init__(data=data, encoding=encoding, mark=mark,
width=width, height=height, **kwargs)
@classmethod
def from_dict(cls, dct, validate=True):
"""Construct class from a dictionary representation
Parameters
----------
dct : dictionary
The dict from which to construct the class
validate : boolean
If True (default), then validate the input against the schema.
Returns
-------
obj : Chart object
The wrapped schema
Raises
------
jsonschema.ValidationError :
if validate=True and dct does not conform to the schema
"""
for class_ in TopLevelMixin.__subclasses__():
if class_ is Chart:
class_ = super(Chart, cls)
try:
return class_.from_dict(dct, validate=validate)
except jsonschema.ValidationError:
pass
# As a last resort, try using the Root vegalite object
return core.Root.from_dict(dct, validate)
def add_selection(self, *selections):
"""Add one or more selections to the chart."""
if not selections:
return self
copy = self.copy(deep=['selection'])
if copy.selection is Undefined:
copy.selection = {}
for s in selections:
copy.selection[s.name] = s.selection
return copy
def interactive(self, name=None, bind_x=True, bind_y=True):
"""Make chart axes scales interactive
Parameters
----------
name : string
The selection name to use for the axes scales. This name should be
unique among all selections within the chart.
bind_x : boolean, default True
If true, then bind the interactive scales to the x-axis
bind_y : boolean, default True
If true, then bind the interactive scales to the y-axis
Returns
-------
chart :
copy of self, with interactive axes added
"""
encodings = []
if bind_x:
encodings.append('x')
if bind_y:
encodings.append('y')
return self.add_selection(selection_interval(bind='scales',
encodings=encodings))
def _check_if_valid_subspec(spec, classname):
"""Check if the spec is a valid sub-spec.
If it is not, then raise a ValueError
"""
err = ('Objects with "{0}" attribute cannot be used within {1}. '
'Consider defining the {0} attribute in the {1} object instead.')
if not isinstance(spec, (core.SchemaBase, dict)):
raise ValueError("Only chart objects can be used in {0}.".format(classname))
for attr in TOPLEVEL_ONLY_KEYS:
if isinstance(spec, core.SchemaBase):
val = getattr(spec, attr, Undefined)
else:
val = spec.get(attr, Undefined)
if val is not Undefined:
raise ValueError(err.format(attr, classname))
def _check_if_can_be_layered(spec):
"""Check if the spec can be layered."""
def _get(spec, attr):
if isinstance(spec, core.SchemaBase):
return spec._get(attr)
else:
return spec.get(attr, Undefined)
encoding = _get(spec, 'encoding')
if encoding is not Undefined:
for channel in ['row', 'column', 'facet']:
if _get(encoding, channel) is not Undefined:
raise ValueError("Faceted charts cannot be layered.")
if isinstance(spec, (Chart, LayerChart)):
return
if not isinstance(spec, (core.SchemaBase, dict)):
raise ValueError("Only chart objects can be layered.")
if _get(spec, 'facet') is not Undefined:
raise ValueError("Faceted charts cannot be layered.")
if isinstance(spec, FacetChart) or _get(spec, 'facet') is not Undefined:
raise ValueError("Faceted charts cannot be layered.")
if isinstance(spec, RepeatChart) or _get(spec, 'repeat') is not Undefined:
raise ValueError("Repeat charts cannot be layered.")
if isinstance(spec, ConcatChart) or _get(spec, 'concat') is not Undefined:
raise ValueError("Concatenated charts cannot be layered.")
if isinstance(spec, HConcatChart) or _get(spec, 'hconcat') is not Undefined:
raise ValueError("Concatenated charts cannot be layered.")
if isinstance(spec, VConcatChart) or _get(spec, 'vconcat') is not Undefined:
raise ValueError("Concatenated charts cannot be layered.")
@utils.use_signature(core.TopLevelRepeatSpec)
class RepeatChart(TopLevelMixin, core.TopLevelRepeatSpec):
"""A chart repeated across rows and columns with small changes"""
def __init__(self, data=Undefined, spec=Undefined, repeat=Undefined, **kwargs):
_check_if_valid_subspec(spec, 'RepeatChart')
super(RepeatChart, self).__init__(data=data, spec=spec, repeat=repeat, **kwargs)
def interactive(self, name=None, bind_x=True, bind_y=True):
"""Make chart axes scales interactive
Parameters
----------
name : string
The selection name to use for the axes scales. This name should be
unique among all selections within the chart.
bind_x : boolean, default True
If true, then bind the interactive scales to the x-axis
bind_y : boolean, default True
If true, then bind the interactive scales to the y-axis
Returns
-------
chart :
copy of self, with interactive axes added
"""
copy = self.copy(deep=False)
copy.spec = copy.spec.interactive(name=name, bind_x=bind_x, bind_y=bind_y)
return copy
def add_selection(self, *selections):
"""Add one or more selections to the chart."""
if not selections or self.spec is Undefined:
return self
copy = self.copy()
copy.spec = copy.spec.add_selection(*selections)
return copy
def repeat(repeater='repeat'):
"""Tie a channel to the row or column within a repeated chart
The output of this should be passed to the ``field`` attribute of
a channel.
Parameters
----------
repeater : {'row'|'column'|'repeat'}
The repeater to tie the field to. Default is 'repeat'.
Returns
-------
repeat : RepeatRef object
"""
if repeater not in ['row', 'column', 'repeat']:
raise ValueError("repeater must be one of ['row', 'column', 'repeat']")
return core.RepeatRef(repeat=repeater)
@utils.use_signature(core.TopLevelConcatSpec)
class ConcatChart(TopLevelMixin, core.TopLevelConcatSpec):
"""A chart with horizontally-concatenated facets"""
def __init__(self, data=Undefined, concat=(), columns=Undefined, **kwargs):
# TODO: move common data to top level?
for spec in concat:
_check_if_valid_subspec(spec, 'ConcatChart')
super(ConcatChart, self).__init__(data=data, concat=list(concat),
columns=columns, **kwargs)
self.data, self.concat = _combine_subchart_data(self.data, self.concat)
def __ior__(self, other):
_check_if_valid_subspec(other, 'ConcatChart')
self.concat.append(other)
self.data, self.concat = _combine_subchart_data(self.data, self.concat)
return self
def __or__(self, other):
copy = self.copy(deep=['concat'])
copy |= other
return copy
def add_selection(self, *selections):
"""Add one or more selections to all subcharts."""
if not selections or not self.concat:
return self
copy = self.copy()
copy.concat = [chart.add_selection(*selections)
for chart in copy.concat]
return copy
def concat(*charts, **kwargs):
"""Concatenate charts horizontally"""
return ConcatChart(concat=charts, **kwargs)
@utils.use_signature(core.TopLevelHConcatSpec)
class HConcatChart(TopLevelMixin, core.TopLevelHConcatSpec):
"""A chart with horizontally-concatenated facets"""
def __init__(self, data=Undefined, hconcat=(), **kwargs):
# TODO: move common data to top level?
for spec in hconcat:
_check_if_valid_subspec(spec, 'HConcatChart')
super(HConcatChart, self).__init__(data=data, hconcat=list(hconcat), **kwargs)
self.data, self.hconcat = _combine_subchart_data(self.data, self.hconcat)
def __ior__(self, other):
_check_if_valid_subspec(other, 'HConcatChart')
self.hconcat.append(other)
self.data, self.hconcat = _combine_subchart_data(self.data, self.hconcat)
return self
def __or__(self, other):
copy = self.copy(deep=['hconcat'])
copy |= other
return copy
def add_selection(self, *selections):
"""Add one or more selections to all subcharts."""
if not selections or not self.hconcat:
return self
copy = self.copy()
copy.hconcat = [chart.add_selection(*selections)
for chart in copy.hconcat]
return copy
def hconcat(*charts, **kwargs):
"""Concatenate charts horizontally"""
return HConcatChart(hconcat=charts, **kwargs)
@utils.use_signature(core.TopLevelVConcatSpec)
class VConcatChart(TopLevelMixin, core.TopLevelVConcatSpec):
"""A chart with vertically-concatenated facets"""
def __init__(self, data=Undefined, vconcat=(), **kwargs):
# TODO: move common data to top level?
for spec in vconcat:
_check_if_valid_subspec(spec, 'VConcatChart')
super(VConcatChart, self).__init__(data=data, vconcat=list(vconcat), **kwargs)
self.data, self.vconcat = _combine_subchart_data(self.data, self.vconcat)
def __iand__(self, other):
_check_if_valid_subspec(other, 'VConcatChart')
self.vconcat.append(other)
self.data, self.vconcat = _combine_subchart_data(self.data, self.vconcat)
return self
def __and__(self, other):
copy = self.copy(deep=['vconcat'])
copy &= other
return copy
def add_selection(self, *selections):
"""Add one or more selections to all subcharts."""
if not selections or not self.vconcat:
return self
copy = self.copy()
copy.vconcat = [chart.add_selection(*selections)
for chart in copy.vconcat]
return copy
def vconcat(*charts, **kwargs):
"""Concatenate charts vertically"""
return VConcatChart(vconcat=charts, **kwargs)
@utils.use_signature(core.TopLevelLayerSpec)
class LayerChart(TopLevelMixin, _EncodingMixin, core.TopLevelLayerSpec):
"""A Chart with layers within a single panel"""
def __init__(self, data=Undefined, layer=(), **kwargs):
# TODO: move common data to top level?
# TODO: check for conflicting interaction
for spec in layer:
_check_if_valid_subspec(spec, 'LayerChart')
_check_if_can_be_layered(spec)
super(LayerChart, self).__init__(data=data, layer=list(layer), **kwargs)
self.data, self.layer = _combine_subchart_data(self.data, self.layer)
def __iadd__(self, other):
_check_if_valid_subspec(other, 'LayerChart')
_check_if_can_be_layered(other)
self.layer.append(other)
self.data, self.layer = _combine_subchart_data(self.data, self.layer)
return self
def __add__(self, other):
copy = self.copy(deep=['layer'])
copy += other
return copy
def add_layers(self, *layers):
copy = self.copy(deep=['layer'])
for layer in layers:
copy += layer
return copy
def interactive(self, name=None, bind_x=True, bind_y=True):
"""Make chart axes scales interactive
Parameters
----------
name : string
The selection name to use for the axes scales. This name should be
unique among all selections within the chart.
bind_x : boolean, default True
If true, then bind the interactive scales to the x-axis
bind_y : boolean, default True
If true, then bind the interactive scales to the y-axis
Returns
-------
chart :
copy of self, with interactive axes added
"""
if not self.layer:
raise ValueError("LayerChart: cannot call interactive() until a "
"layer is defined")
copy = self.copy(deep=['layer'])
copy.layer[0] = copy.layer[0].interactive(name=name, bind_x=bind_x, bind_y=bind_y)
return copy
def add_selection(self, *selections):
"""Add one or more selections to all subcharts."""
if not selections or not self.layer:
return self
copy = self.copy()
copy.layer[0] = copy.layer[0].add_selection(*selections)
return copy
def layer(*charts, **kwargs):
"""layer multiple charts"""
return LayerChart(layer=charts, **kwargs)
@utils.use_signature(core.TopLevelFacetSpec)
class FacetChart(TopLevelMixin, core.TopLevelFacetSpec):
"""A Chart with layers within a single panel"""
def __init__(self, data=Undefined, spec=Undefined, facet=Undefined, **kwargs):
_check_if_valid_subspec(spec, 'FacetChart')
super(FacetChart, self).__init__(data=data, spec=spec, facet=facet, **kwargs)
def interactive(self, name=None, bind_x=True, bind_y=True):
"""Make chart axes scales interactive
Parameters
----------
name : string
The selection name to use for the axes scales. This name should be
unique among all selections within the chart.
bind_x : boolean, default True
If true, then bind the interactive scales to the x-axis
bind_y : boolean, default True
If true, then bind the interactive scales to the y-axis
Returns
-------
chart :
copy of self, with interactive axes added
"""
copy = self.copy(deep=False)
copy.spec = copy.spec.interactive(name=name, bind_x=bind_x, bind_y=bind_y)
return copy
def add_selection(self, *selections):
"""Add one or more selections to the chart."""
if not selections or self.spec is Undefined:
return self
copy = self.copy()
copy.spec = copy.spec.add_selection(*selections)
return copy
def topo_feature(url, feature, **kwargs):
"""A convenience function for extracting features from a topojson url
Parameters
----------
url : string
An URL from which to load the data set.
feature : string
The name of the TopoJSON object set to convert to a GeoJSON feature collection. For
example, in a map of the world, there may be an object set named `"countries"`.
Using the feature property, we can extract this set and generate a GeoJSON feature
object for each country.
**kwargs :
additional keywords passed to TopoDataFormat
"""
return core.UrlData(url=url, format=core.TopoDataFormat(type='topojson',
feature=feature, **kwargs))
def _combine_subchart_data(data, subcharts):
def remove_data(subchart):
if subchart.data is not Undefined:
subchart = subchart.copy()
subchart.data = Undefined
return subchart
if not subcharts:
# No subcharts = nothing to do.
pass
elif data is Undefined:
# Top level has no data; all subchart data must
# be identical to proceed.
subdata = subcharts[0].data
if subdata is not Undefined and all(c.data is subdata for c in subcharts):
data = subdata
subcharts = [remove_data(c) for c in subcharts]
else:
# Top level has data; subchart data must be either
# undefined or identical to proceed.
if all(c.data is Undefined or c.data is data for c in subcharts):
subcharts = [remove_data(c) for c in subcharts]
return data, subcharts
@utils.use_signature(core.SequenceParams)
def sequence(start, stop=None, step=Undefined, as_=Undefined, **kwds):
"""Sequence generator."""
if stop is None:
start, stop = 0, start
params = core.SequenceParams(
start=start, stop=stop, step=step, **{'as': as_}
)
return core.SequenceGenerator(sequence=params, **kwds)
@utils.use_signature(core.GraticuleParams)
def graticule(**kwds):
"""Graticule generator."""
if not kwds:
# graticule: True indicates default parameters
graticule = True
else:
graticule = core.GraticuleParams(**kwds)
return core.GraticuleGenerator(graticule=graticule)
def sphere():
"""Sphere generator."""
return core.SphereGenerator(sphere=True)
| [
"jakevdp@google.com"
] | jakevdp@google.com |
346a1d1871be3ea9c34e4439423a76c4f242e810 | 669e9241b02bdaa303fbc2fd4023b90d4d179a59 | /Randomized Pulled String/challenge3.py | 2d68218a4951ecd60943d4a45d32bde6066f8181 | [] | no_license | benjaminpotter/HatchProjects | 0854cf46ae7c3781468116a5d63b703dd54ae68c | 7f6a948d3474c755d071751b725c059e6c7f3553 | refs/heads/master | 2022-01-28T16:58:03.449073 | 2019-08-16T13:47:30 | 2019-08-16T13:47:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | def mouseClicked():
background(0)
strokeWeight(10)
for i in range (0, width) :
r = random(0, 255)
x = random (0, width)
stroke(r, r, r, 100)
line(i, 0, x, height)
| [
"noreply@github.com"
] | noreply@github.com |
29d1ef17c3c791571dbab5483287b705b1f6dd7a | 475509f771a82603a20d6c7487d5a6c1eca64918 | /venv/bin/normalizer | e84f16f1c3d75e10d6e92bf9baf5fd3fe7e6dfa6 | [] | no_license | ulan2005/api | 83aee48b97c733b7ebc9c86bbc46c70ad65ccf05 | 18df714ae70049baa37d234c35c914afc77a0d69 | refs/heads/master | 2023-08-31T23:05:44.917648 | 2021-11-13T04:20:42 | 2021-11-13T04:20:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | #!/home/tenthent/help/api/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from charset_normalizer.cli.normalizer import cli_detect
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(cli_detect())
| [
"kayratsagynbekov@gmail.com"
] | kayratsagynbekov@gmail.com | |
7bde8de05c2efacea12295b8690650f7dbe05a8c | 04e15ba03e9f0075b156b27c524040884d3fdff3 | /demo_app/models.py | ddf5290e5251db3c5239e68ad0b4f96719103c0c | [] | no_license | 123abc958/demo | eb1291512fe566bb9cf760d33c544e88cd28fdf4 | 202f46c5fdb64e9fa03eb0bf1afd8ac4243f10d1 | refs/heads/master | 2023-04-12T16:58:59.445802 | 2021-05-20T09:29:22 | 2021-05-20T09:29:22 | 346,645,161 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,007 | py | # Create your models here.
from __future__ import unicode_literals
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
class Profile(models.Model):
name = models.CharField('名前', max_length=35)
education = models.CharField('教育', max_length=35)
github= models.CharField('GitHub', max_length=35)
facebook= models.CharField('facebook', max_length=35)
blog= models.CharField('blog', max_length=35)
mail= models.CharField('mail', max_length=35)
mobile = models.CharField('携帯', max_length=35)
def __str__(self):
return self.name
class Post(models.Model):
title = models.CharField('タイトル', max_length=35)
text = models.TextField('本文')
image = models.ImageField('画像', upload_to = 'images', blank=True)
created_at = models.DateTimeField('投稿日', default=timezone.now)
user = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.title
| [
"news@all.sakura.ne.jp"
] | news@all.sakura.ne.jp |
97f518ba5b43b776aa98dea4596d5b0c2fa1cd7b | d530a37faefd35996e9236369c9f473eec029a42 | /server.py | c78ca1d5e5fbb51e913ff33482075b9bf7877463 | [] | no_license | lildwagz/file-tranfer-server | 99ad00f332919fe8fb79275be2736215786b1130 | cf84b6534e1a52e7cbc1a2f4bc2d3a18714f7397 | refs/heads/master | 2020-09-28T02:57:54.229445 | 2019-12-08T22:49:44 | 2019-12-08T22:49:44 | 226,672,535 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 999 | py |
import socket
import threading
import os
def menerima(name, socket):
filename = socket.recv(1024)
if os.path.isfile(filename):
socket.send("EXISTS " + str(os.path.getsize(filename)))
userResponse = socket.recv(1024)
if userResponse[:2] == 'OK':
with open(filename, 'rb') as f:
bytesToSend = f.read(1024)
socket.send(bytesToSend)
while bytesToSend != "":
bytesToSend = f.read(1024)
socket.send(bytesToSend)
else:
socket.send("ERR ")
socket.close()
def Main():
host = '127.0.0.1'
port = 2112
s = socket.socket()
s.bind((host,port))
s.listen(5)
print "Server sudah siap."
while True:
c, addr = s.accept()
print "client baru dari :<" + str(addr) + ">"
t = threading.Thread(target=menerima, args=("RetrThread", c))
t.start()
s.close()
if __name__ == '__main__':
Main()
| [
"zamshit123@gmail.com"
] | zamshit123@gmail.com |
0a030625e9544345a435aa71ebe26d2aeb90f1b0 | 72013fb45d20d03dc21c743d76724c12386cc88b | /tests/funfhmmer_test.py | 98fb677b43d8243fb2d8f51c9a2fc01d0c082ecd | [
"MIT"
] | permissive | shouldsee/cathpy | beefe7319f6272b73d281b39ec049d0a1231fc0e | 5f7fa1322434b2d254f0158c5840f029b12dbafe | refs/heads/master | 2021-03-14T06:03:14.119801 | 2020-01-10T21:39:33 | 2020-01-10T21:39:33 | 246,743,752 | 0 | 0 | NOASSERTION | 2020-03-12T04:31:08 | 2020-03-12T04:31:07 | null | UTF-8 | Python | false | false | 2,120 | py | import glob
import logging
import os
import re
import tempfile
from .testutils import TestBase, log_title, log_level
from cathpy.core.funfhmmer import Client, ResultResponse
import cathpy.core.error as err
LOG = logging.getLogger(__name__)
class TestFunfhmmer(TestBase):
def setUp(self):
self.test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
self.test_fasta_file1 = os.path.join(self.test_data_dir, '2damA00.fa')
self.test_fasta_file2 = os.path.join(
self.test_data_dir, 'A0A0Q0Y989_9BACI.fa')
def test_basic(self):
client = Client()
response = client.search_fasta(fasta_file=self.test_fasta_file1)
self.assertIsInstance(response, ResultResponse)
re_expected_id = r'1.10.8.10/FF/14534'
self.assertRegex(response.as_json(pp=True), re_expected_id)
self.assertRegex(response.funfam_scan.as_json(), re_expected_id)
self.assertRegex(response.funfam_scan.as_tsv(), re_expected_id)
self.assertRegex(
response.funfam_resolved_scan.as_tsv(), re_expected_id)
def test_bad_url(self):
client = Client(base_url='http://invalid.base_url.that.does.not.exist')
with self.assertRaises(err.HttpError):
client.search_fasta(fasta_file=self.test_fasta_file1)
def test_no_results(self):
client = Client()
with self.assertRaises(err.NoMatchesError):
client.search_fasta(
fasta='>test\nAAAAAAGGGGGAAAAAGGGGAAAAAAGGGGGAAAAAGGGGGAAAAGGGGGAAAAA')
def test_resolved_results(self):
client = Client()
response = client.search_fasta(fasta_file=self.test_fasta_file2)
LOG.info("funfam_resolved_scan.as_tsv: %s",
response.funfam_resolved_scan.as_tsv())
self.assertEqual(len(response.funfam_resolved_scan.results),
1, 'resolved_scan has one result')
first_result = response.funfam_resolved_scan.results[0]
self.assertEqual(len(first_result.hits), 2,
'first resolved_scan result has correct number of hits')
| [
"ian@sillit.com"
] | ian@sillit.com |
e077000339f79423ee4da9c789b77491ab87ac5f | fbe3a52d2dd02bec18f7f52b31e357aed192a308 | /misc/begin/exercises/def1.py | 4cbd865b23fdbc03446b3cc80ad9e332cc9a1e7d | [] | no_license | lherrada/python | 8fc5bd5ceb6038479fa6347dd6c0bd6e17f92e98 | d8260f35ba91b89590ef8e489188fb80ca1aed4e | refs/heads/master | 2022-10-29T06:23:17.297554 | 2022-09-24T15:45:59 | 2022-09-24T15:45:59 | 23,411,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 347 | py | #!/usr/bin/python
def general(func,arg):
apply(func,(arg,))
def name(x):
print "Your name is " + x
def age(n):
print "Your age is %d" % n
print "=" * 30
datain=[(name,"Erikita"),(age,38)]
for i,j in datain:
apply(i,(j,))
for i,j in datain:
i(j)
#general(name,"Erikita")
#general(age,38)
#name("Erika")
#x=name
#x("Luis")
#age(37)
| [
"lherrada@lucid64.hsd1.ca.comcast.net"
] | lherrada@lucid64.hsd1.ca.comcast.net |
964172588b7fead7f74c44940f5fcd8122c0e7e7 | 9f3cc35432661786767492d0f90edcc937280e76 | /challenges/web_scam-generator_fixed/solv/z3solver.py | f3bba307e72d4960c7cf0771c846456a324559c3 | [] | no_license | justcatthefish/justctf-2019 | b464470317da033692b1670212dc5571adaa19fe | 3bb692a386f277912ff47567c3d93098f118543b | refs/heads/master | 2021-06-28T21:58:25.588108 | 2021-06-09T05:25:20 | 2021-06-09T05:25:20 | 240,951,074 | 41 | 8 | null | 2020-05-06T15:02:29 | 2020-02-16T19:21:26 | SystemVerilog | UTF-8 | Python | false | false | 2,621 | py | #!/usr/bin/env python2
from z3 import *
import random
import sys
def solve(info, T, N, first_char='', last_char='', hints=[]):
TT = T-1
info = map(lambda x: tuple(x), info)
# remove duplicates
info = list(dict.fromkeys(info))
# convert to integers
info = list(map(lambda x: list(map(lambda y: ord(y), x)), info))
# define symbolic variables
chars = [ Int('x_%s' % i) for i in range((N-TT)*T)]
# solution chars
real_chars = []
# add real chars
for i in range(0, len(chars), T):
real_chars.append(chars[i])
# add real chars suffixs
for i in range(TT):
real_chars.append(chars[-TT+i])
s = Solver()
if first_char:
s.add(real_chars[0] == ord(first_char))
if last_char:
s.add(real_chars[-1] == ord(last_char))
# sequence constraints x1==x3 ^ x2==x4 ^ x4 == x6 ^ x5 == x7 ^
for i in range(1,len(chars)-TT,T):
for d in range(TT):
s.add(chars[i+d] == chars[i+d+TT])
# add hints
for hint in hints:
l = len(hint)
x = []
for i in range(len(real_chars)-l+1):
x.append(And([real_chars[i+d] == ord(hint[d]) for d in range(l)]))
s.add(Or(x))
# all pairs (x0,x1,x2),(x3,x4,x5)... are in info
for i in range(0,len(chars),T):
x = []
for j in info:
x.append(And([chars[i+d] == j[d] for d in range(T)]))
s.add(Or(x))
# each pair from info has been used at least once
for j in info:
x = []
for i in range(0,len(chars),T):
x.append(And([chars[i+d] == j[d] for d in range(T)]))
s.add(Or(x))
solutions = []
while s.check() == sat:
m = s.model()
solutions.append(''.join(list(map(lambda x: chr(m.evaluate(x).as_long()), real_chars))))
s.add(Or([x != m[x] for x in chars]))
return solutions
if __name__ == '__main__':
if len(sys.argv) != 4 or sys.argv[1] not in ['array', 'secret']:
print('python %s array "aaa,vvv,zzz" 32' % sys.argv[0])
print('python %s secret "907e5f274c6d6117040d65739df1ab5a" 3' % sys.argv[0])
sys.exit(0)
if sys.argv[1] == 'array':
info = list(map(lambda x: tuple(x.strip()), sys.argv[2].strip().split(',')))
t = len(info[0])
n = int(sys.argv[3])
if sys.argv[1] == 'secret':
secret = sys.argv[2].strip()
t = int(sys.argv[3])
n = len(secret)
info = zip(*[secret[i:] for i in range(t)])
random.shuffle(info)
print(info)
s = solve(info, t, n)
print(len(s), s[0]) | [
"dominik.b.czarnota@gmail.com"
] | dominik.b.czarnota@gmail.com |
aac18132a830b5831635d8398042176d3291a187 | 970f122ccda9c165910657e8084d78fbc3d7e544 | /mlrun/serving/states.py | 69f0913c051f6ef24ead47e9f95a18bc6b90dcda | [
"Apache-2.0"
] | permissive | kusumy/mlrun | 2a25e88af0dabd05db5e3c3a0fb3787908763291 | 683e8c514d9c26f3a7e82b1becaf7fbf1006f4f2 | refs/heads/master | 2023-07-12T10:54:32.549325 | 2021-08-23T13:58:15 | 2021-08-23T13:58:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46,000 | py | # Copyright 2018 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ["TaskStep", "RouterStep", "RootFlowStep"]
import json
import os
import pathlib
import traceback
import warnings
from copy import copy, deepcopy
from inspect import getfullargspec, signature
from typing import Union
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
from ..config import config
from ..datastore import get_stream_pusher
from ..errors import MLRunInvalidArgumentError
from ..model import ModelObj, ObjectDict
from ..platforms.iguazio import parse_v3io_path
from ..utils import get_class, get_function
callable_prefix = "_"
path_splitter = "/"
previous_step = "$prev"
class GraphError(Exception):
"""error in graph topology or configuration"""
pass
class StepKinds:
router = "router"
task = "task"
flow = "flow"
queue = "queue"
choice = "choice"
root = "root"
_task_step_fields = [
"kind",
"class_name",
"class_args",
"handler",
"skip_context",
"after",
"function",
"comment",
"shape",
"full_event",
"on_error",
"responder",
]
def new_model_endpoint(class_name, model_path, handler=None, **class_args):
class_args = deepcopy(class_args)
class_args["model_path"] = model_path
return TaskStep(class_name, class_args, handler=handler)
def new_remote_endpoint(url, **class_args):
class_args = deepcopy(class_args)
class_args["url"] = url
return TaskStep("$remote", class_args)
class BaseStep(ModelObj):
kind = "BaseStep"
default_shape = "ellipse"
_dict_fields = ["kind", "comment", "after", "on_error"]
def __init__(self, name: str = None, after: list = None, shape: str = None):
self.name = name
self._parent = None
self.comment = None
self.context = None
self.after = after
self._next = None
self.shape = shape
self.on_error = None
self._on_error_handler = None
def get_shape(self):
"""graphviz shape"""
return self.shape or self.default_shape
def set_parent(self, parent):
"""set/link the step parent (flow/router)"""
self._parent = parent
@property
def next(self):
return self._next
@property
def parent(self):
"""step parent (flow/router)"""
return self._parent
def set_next(self, key: str):
"""set/insert the key as next after this step, optionally remove other keys"""
if not self.next:
self._next = [key]
elif key not in self.next:
self._next.append(key)
return self
def after_step(self, after):
"""specify the previous step name"""
# most steps only accept one source
self.after = [after] if after else []
return self
def after_state(self, after):
warnings.warn(
"This method is deprecated. Use after_step instead",
# TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove
PendingDeprecationWarning,
)
return self.after_step(after)
def error_handler(self, step_name: str = None, state_name=None):
"""set error handler step (on failure/raise of this step)"""
if state_name:
warnings.warn(
"The state_name parameter is deprecated. Use step_name instead",
# TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove
PendingDeprecationWarning,
)
step_name = step_name or state_name
if not step_name:
raise MLRunInvalidArgumentError("Must specify step_name")
self.on_error = step_name
return self
def init_object(self, context, namespace, mode="sync", reset=False, **extra_kwargs):
"""init the step class"""
self.context = context
def _is_local_function(self, context):
return True
def get_children(self):
"""get child steps (for router/flow)"""
return []
def __iter__(self):
yield from []
@property
def fullname(self):
"""full path/name (include parents)"""
name = self.name or ""
if self._parent and self._parent.fullname:
name = path_splitter.join([self._parent.fullname, name])
return name.replace(":", "_") # replace for graphviz escaping
def _post_init(self, mode="sync"):
pass
def _set_error_handler(self):
"""init/link the error handler for this step"""
if self.on_error:
error_step = self.context.root.path_to_step(self.on_error)
self._on_error_handler = error_step.run
def _log_error(self, event, err, **kwargs):
"""on failure log (for sync mode)"""
self.context.logger.error(
f"step {self.name} got error {err} when processing an event:\n {event.body}"
)
message = traceback.format_exc()
self.context.logger.error(message)
self.context.push_error(
event, f"{err}\n{message}", source=self.fullname, **kwargs
)
def _call_error_handler(self, event, err, **kwargs):
"""call the error handler if exist"""
if self._on_error_handler:
event.error = str(err)
event.origin_state = self.fullname
return self._on_error_handler(event)
def path_to_step(self, path: str):
"""return step object from step relative/fullname"""
path = path or ""
tree = path.split(path_splitter)
next_level = self
for step in tree:
if step not in next_level:
raise GraphError(
f"step {step} doesnt exist in the graph under {next_level.fullname}"
)
next_level = next_level[step]
return next_level
def path_to_state(self, path: str):
warnings.warn(
"This method is deprecated. Use path_to_step instead",
# TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove
PendingDeprecationWarning,
)
return self.path_to_step(path)
def to(
self,
class_name: Union[str, type] = None,
name: str = None,
handler: str = None,
graph_shape: str = None,
function: str = None,
full_event: bool = None,
**class_args,
):
"""add a step right after this step and return the new step
example, a 4 step pipeline ending with a stream:
graph.to('URLDownloader')\
.to('ToParagraphs')\
.to(name='to_json', handler='json.dumps')\
.to('>>', 'to_v3io', path=stream_path)\
:param class_name: class name or step object to build the step from
for router steps the class name should start with '*'
for queue/stream step the class should be '>>' or '$queue'
:param name: unique name (and path) for the child step, default is class name
:param handler: class/function handler to invoke on run/event
:param graph_shape: graphviz shape name
:param function: function this step should run in
:param full_event: this step accepts the full event (not just body)
:param class_args: class init arguments
"""
if hasattr(self, "steps"):
parent = self
elif self._parent:
parent = self._parent
else:
raise GraphError(
f"step {self.name} parent is not set or its not part of a graph"
)
name, step = params_to_step(
class_name,
name,
handler,
graph_shape=graph_shape,
function=function,
full_event=full_event,
class_args=class_args,
)
step = parent._steps.update(name, step)
step.set_parent(parent)
if not hasattr(self, "steps"):
# check that its not the root, todo: in future may gave nested flows
step.after_step(self.name)
parent._last_added = step
return step
class TaskStep(BaseStep):
"""task execution step, runs a class or handler"""
kind = "task"
_dict_fields = _task_step_fields
_default_class = ""
def __init__(
self,
class_name: Union[str, type] = None,
class_args: dict = None,
handler: str = None,
name: str = None,
after: list = None,
full_event: bool = None,
function: str = None,
responder: bool = None,
):
super().__init__(name, after)
self.class_name = class_name
self.class_args = class_args or {}
self.handler = handler
self.function = function
self._handler = None
self._object = None
self._async_object = None
self.skip_context = None
self.context = None
self._class_object = None
self.responder = responder
self.full_event = full_event
self.on_error = None
self._inject_context = False
def init_object(self, context, namespace, mode="sync", reset=False, **extra_kwargs):
self.context = context
self._async_object = None
if not self._is_local_function(context):
# skip init of non local functions
return
if self.handler and not self.class_name:
# link to function
if callable(self.handler):
self._handler = self.handler
self.handler = self.handler.__name__
else:
self._handler = get_function(self.handler, namespace)
args = signature(self._handler).parameters
if args and "context" in list(args.keys()):
self._inject_context = True
return
if isinstance(self.class_name, type):
self._class_object = self.class_name
self.class_name = self.class_name.__name__
if not self._class_object:
if self.class_name == "$remote":
self._class_object = RemoteHttpHandler
else:
self._class_object = get_class(
self.class_name or self._default_class, namespace
)
if not self._object or reset:
# init the step class + args
class_args = {}
for key, arg in self.class_args.items():
if key.startswith(callable_prefix):
class_args[key[1:]] = get_function(arg, namespace)
else:
class_args[key] = arg
class_args.update(extra_kwargs)
# add name and context only if target class can accept them
argspec = getfullargspec(self._class_object)
if argspec.varkw or "context" in argspec.args:
class_args["context"] = self.context
if argspec.varkw or "name" in argspec.args:
class_args["name"] = self.name
try:
self._object = self._class_object(**class_args)
except TypeError as exc:
raise TypeError(
f"failed to init step {self.name}, {exc}\n args={self.class_args}"
)
# determine the right class handler to use
handler = self.handler
if handler:
if not hasattr(self._object, handler):
raise GraphError(
f"handler ({handler}) specified but doesnt exist in class {self.class_name}"
)
else:
if hasattr(self._object, "do"):
handler = "do"
elif hasattr(self._object, "do_event"):
handler = "do_event"
self.full_event = True
if handler:
self._handler = getattr(self._object, handler, None)
self._set_error_handler()
if mode != "skip":
self._post_init(mode)
def _is_local_function(self, context):
# detect if the class is local (and should be initialized)
current_function = get_current_function(context)
if current_function == "*":
return True
if not self.function and not current_function:
return True
if (
self.function and self.function == "*"
) or self.function == current_function:
return True
return False
@property
def async_object(self):
"""return the sync or async (storey) class instance"""
return self._async_object or self._object
def clear_object(self):
self._object = None
def _post_init(self, mode="sync"):
if self._object and hasattr(self._object, "post_init"):
self._object.post_init(mode)
def respond(self):
"""mark this step as the responder.
step output will be returned as the flow result, no other step can follow
"""
self.responder = True
return self
def run(self, event, *args, **kwargs):
"""run this step, in async flows the run is done through storey"""
if not self._is_local_function(self.context):
# todo invoke remote via REST call
return event
if self.context.verbose:
self.context.logger.info(f"step {self.name} got event {event.body}")
# inject context parameter if it is expected by the handler
if self._inject_context:
kwargs["context"] = self.context
elif kwargs and "context" in kwargs:
del kwargs["context"]
try:
if self.full_event:
return self._handler(event, *args, **kwargs)
event.body = self._handler(event.body, *args, **kwargs)
except Exception as exc:
self._log_error(event, exc)
handled = self._call_error_handler(event, exc)
if not handled:
raise exc
event.terminated = True
return event
class RouterStep(TaskStep):
"""router step, implement routing logic for running child routes"""
kind = "router"
default_shape = "doubleoctagon"
_dict_fields = _task_step_fields + ["routes"]
_default_class = "mlrun.serving.ModelRouter"
def __init__(
self,
class_name: Union[str, type] = None,
class_args: dict = None,
handler: str = None,
routes: list = None,
name: str = None,
function: str = None,
):
super().__init__(class_name, class_args, handler, name=name, function=function)
self._routes: ObjectDict = None
self.routes = routes
def get_children(self):
"""get child steps (routes)"""
return self._routes.values()
@property
def routes(self):
"""child routes/steps, traffic is routed to routes based on router logic"""
return self._routes
@routes.setter
def routes(self, routes: dict):
self._routes = ObjectDict.from_dict(classes_map, routes, "task")
def add_route(self, key, route=None, class_name=None, handler=None, **class_args):
"""add child route step or class to the router
:param key: unique name (and route path) for the child step
:param route: child step object (Task, ..)
:param class_name: class name to build the route step from (when route is not provided)
:param class_args: class init arguments
:param handler: class handler to invoke on run/event
"""
if not route and not class_name:
raise MLRunInvalidArgumentError("route or class_name must be specified")
if not route:
route = TaskStep(class_name, class_args, handler=handler)
route = self._routes.update(key, route)
route.set_parent(self)
return route
def clear_children(self, routes: list):
"""clear child steps (routes)"""
if not routes:
routes = self._routes.keys()
for key in routes:
del self._routes[key]
def init_object(self, context, namespace, mode="sync", reset=False, **extra_kwargs):
if not self._is_local_function(context):
return
self.class_args = self.class_args or {}
super().init_object(
context, namespace, "skip", reset=reset, routes=self._routes, **extra_kwargs
)
for route in self._routes.values():
route.set_parent(self)
route.init_object(context, namespace, mode, reset=reset)
self._set_error_handler()
self._post_init(mode)
def __getitem__(self, name):
return self._routes[name]
def __setitem__(self, name, route):
self.add_route(name, route)
def __delitem__(self, key):
del self._routes[key]
def __iter__(self):
yield from self._routes.keys()
def plot(self, filename=None, format=None, source=None, **kw):
"""plot/save a graphviz plot"""
return _generate_graphviz(
self, _add_graphviz_router, filename, format, source=source, **kw
)
class QueueStep(BaseStep):
"""queue step, implement an async queue or represent a stream"""
kind = "queue"
default_shape = "cds"
_dict_fields = BaseStep._dict_fields + [
"path",
"shards",
"retention_in_hours",
"options",
]
def __init__(
self,
name: str = None,
path: str = None,
after: list = None,
shards: int = None,
retention_in_hours: int = None,
**options,
):
super().__init__(name, after)
self.path = path
self.shards = shards
self.retention_in_hours = retention_in_hours
self.options = options
self._stream = None
self._async_object = None
def init_object(self, context, namespace, mode="sync", reset=False, **extra_kwargs):
self.context = context
if self.path:
self._stream = get_stream_pusher(
self.path,
shards=self.shards,
retention_in_hours=self.retention_in_hours,
)
self._set_error_handler()
@property
def async_object(self):
return self._async_object
def after_step(self, after):
# queue steps accept multiple sources
if self.after:
if after:
self.after.append(after)
else:
self.after = [after] if after else []
return self
def after_state(self, after):
warnings.warn(
"This method is deprecated. Use after_step instead",
# TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove
PendingDeprecationWarning,
)
return self.after_step(after)
def run(self, event, *args, **kwargs):
data = event.body
if not data:
return event
if self._stream:
self._stream.push({"id": event.id, "body": data, "path": event.path})
event.terminated = True
event.body = None
return event
class FlowStep(BaseStep):
"""flow step, represent a workflow or DAG"""
kind = "flow"
_dict_fields = BaseStep._dict_fields + [
"steps",
"engine",
"default_final_step",
]
# TODO - remove once "states" is fully deprecated
@classmethod
def from_dict(cls, struct=None, fields=None, deprecated_fields: dict = None):
deprecated_fields = deprecated_fields or {}
deprecated_fields.update(
{"states": "steps", "default_final_state": "default_final_step"}
)
return super().from_dict(
struct, fields=fields, deprecated_fields=deprecated_fields
)
def __init__(
self,
name=None,
steps=None,
after: list = None,
engine=None,
final_step=None,
# TODO - remove once usage of "state" is fully deprecated
states=None,
final_state=None,
):
super().__init__(name, after)
if states:
warnings.warn(
"The states parameter is deprecated. Use steps instead",
# TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove
PendingDeprecationWarning,
)
steps = steps or states
if final_state:
warnings.warn(
"The final_state parameter is deprecated. Use final_step instead",
# TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove
PendingDeprecationWarning,
)
final_step = final_step or final_state
self._steps = None
self.steps = steps
self.engine = engine
# TODO - remove use of START_FROM_STATE once it's fully deprecated.
self.from_step = os.environ.get("START_FROM_STEP", None) or os.environ.get(
"START_FROM_STATE", None
)
self.final_step = final_step
self._last_added = None
self._controller = None
self._wait_for_result = False
self._source = None
self._start_steps = []
def get_children(self):
return self._steps.values()
@property
def steps(self):
"""child (workflow) steps"""
return self._steps
@property
def states(self):
warnings.warn(
"This property is deprecated. Use steps instead",
# TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove
PendingDeprecationWarning,
)
return self._steps
@property
def controller(self):
"""async (storey) flow controller"""
return self._controller
@steps.setter
def steps(self, steps):
self._steps = ObjectDict.from_dict(classes_map, steps, "task")
@states.setter
def states(self, states):
warnings.warn(
"This property is deprecated. Use steps instead",
# TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove
PendingDeprecationWarning,
)
self._steps = ObjectDict.from_dict(classes_map, states, "task")
def add_step(
self,
class_name=None,
name=None,
handler=None,
after=None,
before=None,
graph_shape=None,
function=None,
full_event: bool = None,
**class_args,
):
"""add task, queue or router step/class to the flow
use after/before to insert into a specific location
example:
graph = fn.set_topology("flow", exist_ok=True)
graph.add_step(class_name="Chain", name="s1")
graph.add_step(class_name="Chain", name="s3", after="$prev")
graph.add_step(class_name="Chain", name="s2", after="s1", before="s3")
:param class_name: class name or step object to build the step from
for router steps the class name should start with '*'
for queue/stream step the class should be '>>' or '$queue'
:param name: unique name (and path) for the child step, default is class name
:param handler: class/function handler to invoke on run/event
:param after: the step name this step comes after
can use $prev to indicate the last added step
:param before: string or list of next step names that will run after this step
:param graph_shape: graphviz shape name
:param function: function this step should run in
:param class_args: class init arguments
"""
name, step = params_to_step(
class_name,
name,
handler,
graph_shape=graph_shape,
function=function,
full_event=full_event,
class_args=class_args,
)
self.insert_step(name, step, after, before)
return step
def insert_state(self, key, state, after, before=None):
warnings.warn(
"This method is deprecated. Use insert_step instead",
# TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove
PendingDeprecationWarning,
)
return self.insert_step(key, state, after, before)
def insert_step(self, key, step, after, before=None):
"""insert step object into the flow, specify before and after"""
step = self._steps.update(key, step)
step.set_parent(self)
if after == "$prev" and len(self._steps) == 1:
after = None
previous = ""
if after:
if after == "$prev" and self._last_added:
previous = self._last_added.name
else:
if after not in self._steps.keys():
raise MLRunInvalidArgumentError(
f"cant set after, there is no step named {after}"
)
previous = after
step.after_step(previous)
if before:
if before not in self._steps.keys():
raise MLRunInvalidArgumentError(
f"cant set before, there is no step named {before}"
)
if before == step.name or before == previous:
raise GraphError(
f"graph loop, step {before} is specified in before and/or after {key}"
)
self[before].after_step(step.name)
self._last_added = step
return step
def clear_children(self, steps: list = None, states: list = None):
"""remove some or all of the states, empty/None for all"""
if states:
warnings.warn(
"This states parameter is deprecated. Use steps instead",
# TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove
PendingDeprecationWarning,
)
steps = steps or states
if not steps:
steps = self._steps.keys()
for key in steps:
del self._steps[key]
def __getitem__(self, name):
return self._steps[name]
def __setitem__(self, name, step):
self.add_step(name, step)
def __delitem__(self, key):
del self._steps[key]
def __iter__(self):
yield from self._steps.keys()
def init_object(self, context, namespace, mode="sync", reset=False, **extra_kwargs):
self.context = context
self.check_and_process_graph()
for step in self._steps.values():
step.set_parent(self)
step.init_object(context, namespace, mode, reset=reset)
self._set_error_handler()
self._post_init(mode)
if self.engine != "sync":
self._build_async_flow()
def check_and_process_graph(self, allow_empty=False):
"""validate correct graph layout and initialize the .next links"""
if self.is_empty() and allow_empty:
self._start_steps = []
return [], None, []
def has_loop(step, previous):
for next_step in step.after or []:
if next_step in previous:
return step.name
downstream = has_loop(self[next_step], previous + [next_step])
if downstream:
return downstream
return None
start_steps = []
for step in self._steps.values():
step._next = None
if step.after:
loop_step = has_loop(step, [])
if loop_step:
raise GraphError(
f"Error, loop detected in step {loop_step}, graph must be acyclic (DAG)"
)
else:
start_steps.append(step.name)
responders = []
for step in self._steps.values():
if hasattr(step, "responder") and step.responder:
responders.append(step.name)
if step.on_error and step.on_error in start_steps:
start_steps.remove(step.on_error)
if step.after:
prev_step = step.after[0]
self[prev_step].set_next(step.name)
if self.on_error and self.on_error in start_steps:
start_steps.remove(self.on_error)
if (
len(responders) > 1
): # should not have multiple steps which respond to request
raise GraphError(
f'there are more than one responder steps in the graph ({",".join(responders)})'
)
if self.from_step:
if self.from_step not in self.steps:
raise GraphError(
f"from_step ({self.from_step}) specified and not found in graph steps"
)
start_steps = [self.from_step]
self._start_steps = [self[name] for name in start_steps]
def get_first_function_step(step, current_function):
# find the first step which belongs to the function
if (
hasattr(step, "function")
and step.function
and step.function == current_function
):
return step
for item in step.next or []:
next_step = self[item]
returned_step = get_first_function_step(next_step, current_function)
if returned_step:
return returned_step
current_function = get_current_function(self.context)
if current_function and current_function != "*":
new_start_steps = []
for from_step in self._start_steps:
step = get_first_function_step(from_step, current_function)
if step:
new_start_steps.append(step)
if not new_start_steps:
raise GraphError(
f"did not find steps pointing to current function ({current_function})"
)
self._start_steps = new_start_steps
if self.engine == "sync" and len(self._start_steps) > 1:
raise GraphError(
"sync engine can only have one starting step (without .after)"
)
default_final_step = None
if self.final_step:
if self.final_step not in self.steps:
raise GraphError(
f"final_step ({self.final_step}) specified and not found in graph steps"
)
default_final_step = self.final_step
elif len(self._start_steps) == 1:
# find the final step in case if a simple sequence of steps
next_obj = self._start_steps[0]
while next_obj:
next = next_obj.next
if not next:
default_final_step = next_obj.name
break
next_obj = self[next[0]] if len(next) == 1 else None
return self._start_steps, default_final_step, responders
def set_flow_source(self, source):
"""set the async flow (storey) source"""
self._source = source
def _build_async_flow(self):
"""initialize and build the async/storey DAG"""
try:
import storey
except ImportError:
raise GraphError("storey package is not installed, use pip install storey")
def process_step(state, step, root):
if not state._is_local_function(self.context):
return
for item in state.next or []:
next_state = root[item]
if next_state.async_object:
next_step = step.to(next_state.async_object)
process_step(next_state, next_step, root)
for step in self._steps.values():
if hasattr(step, "async_object") and step._is_local_function(self.context):
if step.kind == StepKinds.queue:
skip_stream = self.context.is_mock and step.next
if step.path and not skip_stream:
stream_path = step.path
endpoint = None
if "://" in stream_path:
endpoint, stream_path = parse_v3io_path(step.path)
stream_path = stream_path.strip("/")
step._async_object = storey.StreamTarget(
storey.V3ioDriver(endpoint), stream_path
)
else:
step._async_object = storey.Map(lambda x: x)
elif not step.async_object or not hasattr(
step.async_object, "_outlets"
):
# if regular class, wrap with storey Map
step._async_object = storey.Map(
step._handler,
full_event=step.full_event,
name=step.name,
context=self.context,
)
if not step.next and hasattr(step, "responder") and step.responder:
# if responder step (return result), add Complete()
step.async_object.to(storey.Complete(full_event=True))
self._wait_for_result = True
# todo: allow source array (e.g. data->json loads..)
source = self._source or storey.SyncEmitSource()
for next_state in self._start_steps:
next_step = source.to(next_state.async_object)
process_step(next_state, next_step, self)
for step in self._steps.values():
# add error handler hooks
if (step.on_error or self.on_error) and step.async_object:
error_step = self._steps[step.on_error or self.on_error]
step.async_object.set_recovery_step(error_step.async_object)
self._controller = source.run()
def get_queue_links(self):
"""return dict of function and queue its listening on, for building stream triggers"""
links = {}
for step in self.get_children():
if step.kind == StepKinds.queue:
for item in step.next or []:
next_step = self[item]
if next_step.function:
if next_step.function in links:
raise GraphError(
f"function ({next_step.function}) cannot read from multiple queues"
)
links[next_step.function] = step
return links
def init_queues(self):
"""init/create the streams used in this flow"""
for step in self.get_children():
if step.kind == StepKinds.queue:
step.init_object(self.context, None)
def is_empty(self):
"""is the graph empty (no child steps)"""
return len(self.steps) == 0
@staticmethod
async def _await_and_return_id(awaitable, event):
await awaitable
event = copy(event)
event.body = {"id": event.id}
return event
def run(self, event, *args, **kwargs):
if self._controller:
# async flow (using storey)
event._awaitable_result = None
if config.datastore.async_source_mode == "enabled":
resp_awaitable = self._controller.emit(
event, await_result=self._wait_for_result
)
if self._wait_for_result:
return resp_awaitable
return self._await_and_return_id(resp_awaitable, event)
else:
resp = self._controller.emit(
event, return_awaitable_result=self._wait_for_result
)
if self._wait_for_result and resp:
return resp.await_result()
event = copy(event)
event.body = {"id": event.id}
return event
if len(self._start_steps) == 0:
return event
next_obj = self._start_steps[0]
while next_obj:
try:
event = next_obj.run(event, *args, **kwargs)
except Exception as exc:
self._log_error(event, exc, failed_step=next_obj.name)
handled = self._call_error_handler(event, exc)
if not handled:
raise exc
event.terminated = True
return event
if hasattr(event, "terminated") and event.terminated:
return event
next = next_obj.next
if next and len(next) > 1:
raise GraphError(
f"synchronous flow engine doesnt support branches use async, step={next_obj.name}"
)
next_obj = self[next[0]] if next else None
return event
def wait_for_completion(self):
"""wait for completion of run in async flows"""
if self._controller:
if hasattr(self._controller, "terminate"):
self._controller.terminate()
return self._controller.await_termination()
def plot(self, filename=None, format=None, source=None, targets=None, **kw):
"""plot/save graph using graphviz"""
return _generate_graphviz(
self,
_add_graphviz_flow,
filename,
format,
source=source,
targets=targets,
**kw,
)
class RootFlowStep(FlowStep):
"""root flow step"""
kind = "root"
_dict_fields = ["steps", "engine", "final_step", "on_error"]
# TODO - remove once "final_state" is fully deprecated
@classmethod
def from_dict(cls, struct=None, fields=None):
return super().from_dict(
struct, fields=fields, deprecated_fields={"final_state": "final_step"}
)
http_adapter = HTTPAdapter(
max_retries=Retry(total=3, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
)
class RemoteHttpHandler:
"""class for calling remote endpoints"""
def __init__(self, url):
self.url = url
self.format = "json"
self._session = requests.Session()
self._session.mount("http://", http_adapter)
self._session.mount("https://", http_adapter)
def do_event(self, event):
kwargs = {}
kwargs["headers"] = event.headers or {}
method = event.method or "POST"
if method != "GET":
if isinstance(event.body, (str, bytes)):
kwargs["data"] = event.body
else:
kwargs["json"] = event.body
url = self.url.strip("/") + event.path
try:
resp = self._session.request(method, url, verify=False, **kwargs)
except OSError as err:
raise OSError(f"error: cannot run function at url {url}, {err}")
if not resp.ok:
raise RuntimeError(f"bad function response {resp.text}")
data = resp.content
if (
self.format == "json"
or resp.headers["content-type"] == "application/json"
and isinstance(data, (str, bytes))
):
data = json.loads(data)
event.body = data
return event
classes_map = {
"task": TaskStep,
"router": RouterStep,
"flow": FlowStep,
"queue": QueueStep,
}
def get_current_function(context):
if context and hasattr(context, "current_function"):
return context.current_function or ""
return ""
def _add_graphviz_router(graph, step, source=None, **kwargs):
if source:
graph.node("_start", source.name, shape=source.shape, style="filled")
graph.edge("_start", step.fullname)
graph.node(step.fullname, label=step.name, shape=step.get_shape())
for route in step.get_children():
graph.node(route.fullname, label=route.name, shape=route.get_shape())
graph.edge(step.fullname, route.fullname)
def _add_graphviz_flow(
graph, step, source=None, targets=None,
):
start_steps, default_final_step, responders = step.check_and_process_graph(
allow_empty=True
)
graph.node("_start", source.name, shape=source.shape, style="filled")
for start_step in start_steps:
graph.edge("_start", start_step.fullname)
for child in step.get_children():
kind = child.kind
if kind == StepKinds.router:
with graph.subgraph(name="cluster_" + child.fullname) as sg:
_add_graphviz_router(sg, child)
else:
graph.node(child.fullname, label=child.name, shape=child.get_shape())
after = child.after or []
for item in after:
previous_object = step[item]
kw = (
{"ltail": "cluster_" + previous_object.fullname}
if previous_object.kind == StepKinds.router
else {}
)
graph.edge(previous_object.fullname, child.fullname, **kw)
if child.on_error:
graph.edge(child.fullname, child.on_error, style="dashed")
# draw targets after the last step (if specified)
if targets:
for target in targets or []:
graph.node(target.fullname, label=target.name, shape=target.get_shape())
last_step = target.after or default_final_step
if last_step:
graph.edge(last_step, target.fullname)
def _generate_graphviz(
step, renderer, filename=None, format=None, source=None, targets=None, **kw,
):
try:
from graphviz import Digraph
except ImportError:
raise ImportError(
'graphviz is not installed, run "pip install graphviz" first!'
)
graph = Digraph("mlrun-flow", format="jpg")
graph.attr(compound="true", **kw)
source = source or BaseStep("start", shape="egg")
renderer(graph, step, source=source, targets=targets)
if filename:
suffix = pathlib.Path(filename).suffix
if suffix:
filename = filename[: -len(suffix)]
format = format or suffix[1:]
format = format or "png"
graph.render(filename, format=format)
return graph
def graph_root_setter(server, graph):
"""set graph root object from class or dict"""
if graph:
if isinstance(graph, dict):
kind = graph.get("kind")
elif hasattr(graph, "kind"):
kind = graph.kind
else:
raise MLRunInvalidArgumentError("graph must be a dict or a valid object")
if kind == StepKinds.router:
server._graph = server._verify_dict(graph, "graph", RouterStep)
elif not kind or kind == StepKinds.root:
server._graph = server._verify_dict(graph, "graph", RootFlowStep)
else:
raise GraphError(f"illegal root step {kind}")
def get_name(name, class_name):
"""get task name from provided name or class"""
if name:
return name
if not class_name:
raise MLRunInvalidArgumentError("name or class_name must be provided")
if isinstance(class_name, type):
return class_name.__name__
return class_name
def params_to_state(
class_name,
name,
handler=None,
graph_shape=None,
function=None,
full_event=None,
class_args=None,
):
warnings.warn(
"This method is deprecated. Use param_to_step instead",
# TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove
PendingDeprecationWarning,
)
return params_to_step(
class_name, name, handler, graph_shape, function, full_event, class_args
)
def params_to_step(
class_name,
name,
handler=None,
graph_shape=None,
function=None,
full_event=None,
class_args=None,
):
"""return step object from provided params or classes/objects"""
if class_name and hasattr(class_name, "to_dict"):
struct = class_name.to_dict()
kind = struct.get("kind", StepKinds.task)
name = name or struct.get("name", struct.get("class_name"))
cls = classes_map.get(kind, RootFlowStep)
step = cls.from_dict(struct)
step.function = function
step.full_event = full_event
elif class_name and class_name in [">>", "$queue"]:
if "path" not in class_args:
raise MLRunInvalidArgumentError(
"path=<stream path or None> must be specified for queues"
)
if not name:
raise MLRunInvalidArgumentError("queue name must be specified")
step = QueueStep(name, **class_args)
elif class_name and class_name.startswith("*"):
routes = class_args.get("routes", None)
class_name = class_name[1:]
name = get_name(name, class_name or "router")
step = RouterStep(
class_name, class_args, handler, name=name, function=function, routes=routes
)
elif class_name or handler:
name = get_name(name, class_name)
step = TaskStep(
class_name,
class_args,
handler,
name=name,
function=function,
full_event=full_event,
)
else:
raise MLRunInvalidArgumentError("class_name or handler must be provided")
if graph_shape:
step.shape = graph_shape
return name, step
| [
"noreply@github.com"
] | noreply@github.com |
cc9883e5b55518c5f51112fba077922ad9c5be72 | f7600adc88cb08c2fdb067dcdd42f6916aa89a66 | /src/Copy of GraphColoringSimulation.py | 9403eb756ba3a3f834eea25e664382443779532a | [] | no_license | ofraam/GraphColoringMIPs | 0b1003e9d34ffc937cb8d426206a64d4c3275efe | 048ca01d66f1b1d4e7e9e2213764f98f81c806f7 | refs/heads/master | 2021-01-22T01:04:59.570512 | 2015-08-28T21:35:14 | 2015-08-28T21:35:14 | 37,035,554 | 0 | 1 | null | 2015-08-23T16:19:14 | 2015-06-07T23:01:56 | Python | UTF-8 | Python | false | false | 19,550 | py | '''
Created on Jun 7, 2015
@author: Ofra
'''
import networkx as nx
import math
import numpy as np
import random
from GraphProblem import GraphProblem
from Agent import Agent
import copy
from MIP import Mip
from Utils import Action
from Utils import Result
from Utils import Session
import csv
import Baselines
from Baselines import RandomSystem, MostChangedSystem,\
MostChangedInIntervalSystem, LatestChangedSystem
'''
Class that runs the simulation
@overlap defines how many agents control each node (lamda for poisson distribution so not all nodes are the same)
@systems is a list of system objects (i.e., algorithms to compare)
'''
class Simulation:
def __init__(self, graph, numAgents, numColors, systems, overlap = 1, maxIterations = 1000, actionLimit = 3, queryLimit = 5, weightInc = 1.0, setting = "all", numNodesPerCluster,pWithin,pBetween):
#generate graph structure
self.graph = graph
#assign random colors
for node,data in self.graph.nodes(data = True):
data['color'] = random.randint(0,numColors-1)
#create graphProblem object
self.colors = [i for i in xrange(numColors)]
problemGraph = copy.deepcopy(self.graph)
self.instance = GraphProblem(problemGraph, self.colors)
self.numAgents = numAgents
self.overlap = overlap
self.systems = systems
self.setting = setting
self.solved = False #is the CSP problem solved (to terminate simulation)
self.numIterations = 0
self.maxIterations = maxIterations
self.actionLimit = actionLimit
self.queryLimit = queryLimit
self.weightIncOfAction = weightInc
#create agents objects
self.agents = []
self.clusters = {}
self.nodeToClusterIndex = {}
self.generateClusteredGraph(numAgents, numNodesPerCluster, pWithin, pBetween)
#assign nodes to agents
agentAssignments = self.assignAgents()
# agentAssignments = {0: [2, 5, 6, 7, 8, 9, 10, 11, 14, 16, 19, 21, 22, 25, 27, 29], 1: [0, 1, 3, 4, 5, 6, 8, 11, 14, 15, 20, 22, 23, 24, 26, 29], 2: [1, 3, 8, 9, 10, 12, 13, 15, 17, 18, 19, 22, 24, 28]}
# agentAssignments = {0: [0, 1, 3, 5, 6, 7, 16, 18, 19, 20, 22, 23, 25, 26, 28, 29, 33, 36, 38, 39, 41, 43, 45, 46, 47], 1: [7, 9, 10, 11, 12, 13, 15, 16, 20, 21, 22, 24, 27, 28, 31, 32, 33, 34, 35, 37, 38, 39, 40, 41, 43, 44, 45, 46, 49], 2: [1, 2, 4, 7, 8, 11, 13, 14, 15, 17, 23, 24, 26, 28, 30, 32, 35, 36, 38, 42, 48]}
# #TODO: revert after testing
# agentAssignments = {}
# agentAssignments[0] = [0,4,5,6,8]
# agentAssignments[1] = [7,8,9]
# agentAssignments[2] = [1,2,3,4,9]
print agentAssignments
self.agentAssignments = agentAssignments
return
def resetSystems(self, newSystems):
self.systems = newSystems
def assignAgentsByClusters(self):
def assignAgents(self):
agentAssignments = {}
agentIds = []
for agent in range(self.numAgents):
agentAssignments[agent] = []
agentIds.append(agent)
for i in self.graph.nodes():
numAgentsControllingNode = np.random.poisson(self.overlap) #draw from distribution
numAgentsControllingNode = max(1,numAgentsControllingNode) #can't have a node that is not controlled by any agent
numAgentsControllingNode = min(self.numAgents,numAgentsControllingNode) #can't have more than the number of agents controlling a node
assignedAgents = random.sample(agentIds, numAgentsControllingNode)
for a in assignedAgents:
agentAssignments[a].append(i)
return agentAssignments
def generateClusteredGraph(self,numClusters,nodesPerCluster,pEdgeIn,pEdgeBet):
g = nx.Graph()
totalNodeCount = 0
for clust in numClusters:
numNodesInClust = max(np.random.poisson(nodesPerCluster),2)
clusterNodes = [] #TODO: verify doesn't mess up other lists
for node in numNodesInClust:
g.add_node(totalNodeCount)
clusterNodes.append(totalNodeCount)
self.nodeToClusterIndex[totalNodeCount]=clust
totalNodeCount=totalNodeCount+1
self.clusters[clust] = clusterNodes
#add edges
for i in range(totalNodeCount):
for j in range(i+1,totalNodeCount):
if self.nodeToClusterIndex[i]==self.nodeToClusterIndex[j]:
if np.random.rand()<pEdgeIn:
g.add_edge(i,j)
else:
if np.random.rand()<pEdgeBet:
g.add_edge(i, j)
def runSimulation(self, outputFilename, graphName, run = 0):
#store results
results = []
#save initial state to revert for each system
#run each system
for system in self.systems:
initialProblem = copy.deepcopy(self.instance)
self.agents = [] #reset agents
for agent,nodes in self.agentAssignments.iteritems():
newAgent = Agent(agent,nodes,copy.deepcopy(self.graph), self.colors,self.actionLimit, reset = False)
self.agents.append(newAgent)
print 'starting to run algorithm: '+str(system)
while ((self.solved == False) & (self.numIterations<self.maxIterations)):
if self.numIterations > 50:
a = 0
for agent in self.agents: #agents iterate in round robin. #TODO: in future, consider non-uniform session
if self.setting == "all":
nodesToShare = system.query(agent.id, self.queryLimit) #get nodes info to share with agent. nodesToShare is list of nodes
else: #only ranking changes, need to send first rev to consider
nodesToShare = system.query(agent.id, self.queryLimit, startRev = agent.lastRevision+1)
info = {} #dict holding nodes and their colors (to share with agent)
for node in nodesToShare:
info[node] = self.instance.getColor(node)
agent.updateBelief(info) #update agents knowledge
actions = agent.chooseActions(self.numIterations,minActions = 3) #query agent for actions
# print actions
#send update to system
actionObjs = []
for node,col in actions:
actionObj = Action(agent.id, node, 'sigEdit', col, self.weightIncOfAction, 1.0)
actionObjs.append(actionObj)
session = Session(agent.id, actionObjs, self.numIterations, nodesToShare)
system.update(session) #send info back to system
#save status
res = {}
res['graphName'] = graphName
res['algorithm'] = system
res['iteration'] = self.numIterations
state = self.instance.getGraphState()
res['conflicts'] = state['conflicts']
res['unknown'] = state['unknown']
res['notConflicts'] = state['notConflicts']
res['percentColored'] = self.instance.getPercentColored()
res['run'] = run
results.append(res)
#send update to GraphProblem
self.instance.updateGraph(actions)
# filename = "../graphPlots1/"+str(system)+"_"+str(self.numIterations)
# self.instance.drawGraph(filename)
#increment num of iterations
self.numIterations = self.numIterations + 1
#save results
# res = Result(system, self.numIterations, self.instance.getGraphState(), self.instance.getPercentColored())
# results[system] = res
#revert graph and restart iterations counter
self.instance = initialProblem
self.numIterations = 0
print 'finished running algorithm: '+str(system)
#save results to file
with open(outputFilename, 'ab') as csvfile:
fieldnames = ['graphName', 'algorithm', 'iteration', 'conflicts','unknown','notConflicts','percentColored','run']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
if run == 0:
writer.writeheader()
for res in results:
writer.writerow(res)
def runGraph9nodes():
systems = []
randSys = RandomSystem(setting = "changes")
mostChanged = MostChangedInIntervalSystem(200)
mostChangeInt = MostChangedInIntervalSystem(5)
latestSys = LatestChangedSystem()
# systems.append(randSys)
# systems.append(mostChanged)
mip = Mip()
systems.append(mip)
# systems.append(randSys)
# systems.append(mostChangeInt)
# systems.append(latestSys)
g = nx.Graph()
for i in range(10):
g.add_node(i)
g.add_edge(0, 1)
g.add_edge(0, 5)
g.add_edge(0, 6)
g.add_edge(0, 7)
g.add_edge(1, 2)
g.add_edge(1, 7)
g.add_edge(1, 9)
g.add_edge(2, 3)
g.add_edge(2, 9)
g.add_edge(3, 4)
g.add_edge(3, 7)
g.add_edge(3, 9)
g.add_edge(4, 5)
g.add_edge(4, 6)
g.add_edge(4, 7)
g.add_edge(4, 8)
g.add_edge(5, 6)
g.add_edge(6, 7)
g.add_edge(6, 8)
g.add_edge(7, 8)
g.add_edge(7, 9)
filename = "../results/testingStuff6.csv"
graphName = "9graph"
sim = Simulation(g, 3, 3, systems, overlap = 2, maxIterations = 200, actionLimit = 3, queryLimit = 5, weightInc = 1.0, setting = "changes")
sim.runSimulation(filename,graphName)
def frange(start,stop, step=1.0):
while start < stop:
yield start
start +=step
if __name__ == '__main__':
# runGraph9nodes()
# a = 1/0
# systems = []
# randSys = RandomSystem(setting = "changes")
# mostChanged = MostChangedInIntervalSystem(200) #essentially all revisions...
# mostChangeInt = MostChangedInIntervalSystem(5)
# latestSys = LatestChangedSystem()
# systems.append(randSys)
# systems.append(mostChanged)
# mip = Mip()
# systems.append(mip)
# systems.append(mostChangeInt)
# systems.append(latestSys)
# graph = nx.fast_gnp_random_graph(20, 0.6)
## graph = nx.watts_strogatz_graph(20, 5, 0.7)
# graphName = 'random_20_06'
# filename= '../results/'+graphName+"__queryLimit3_agents3_overlap2.csv"
# for i in range(10):
# graphName = graphName+"_"+str(i)
# sim = Simulation(graph, 3, 3, systems, overlap = 2, maxIterations = 200, actionLimit = 3, queryLimit = 3, weightInc = 1.0)
# sim.runSimulation(filename,graphName)
# systems = []
# randSys = RandomSystem()
# mostChanged = MostChangedSystem()
# mostChangeInt = MostChangedInIntervalSystem(5)
# latestSys = LatestChangedSystem()
# systems.append(randSys)
# systems.append(mostChanged)
# mip = Mip()
# systems.append(mip)
# systems.append(mostChangeInt)
# systems.append(latestSys)
#
numNodes = 20
numAgents = 2
p = 0.1
for numNodes in range(20,51,5):
for p in frange(0.05,0.3,0.05):
graph = nx.fast_gnp_random_graph(numNodes,p)
graphName = 'random_'+str(numNodes)+"_"+str(p)
for numAgents in range(2,6):
systems = []
randSys = RandomSystem(setting = "changes")
mostChanged = MostChangedInIntervalSystem(200) #essentially all revisions...
mostChangeInt = MostChangedInIntervalSystem(5)
latestSys = LatestChangedSystem()
systems.append(randSys)
systems.append(mostChanged)
mip = Mip()
systems.append(mip)
systems.append(mostChangeInt)
systems.append(latestSys)
sim = Simulation(graph, numAgents, 3, systems, overlap = 2, maxIterations = 150, actionLimit = 5, queryLimit = 5, weightInc = 1.0, setting = "changes")
systemsBeforeRun = copy.deepcopy(systems)
filename= '../results/'+graphName+"onlyBetaMIP_200iter_colored_changes_minAction3__queryLimit5_actionLimit5_agents"+str(numAgents)+"_overlap2.csv"
for i in range(5):
sim.runSimulation(filename,graphName, run = i)
sim.resetSystems(systemsBeforeRun)
# graph = nx.fast_gnp_random_graph(20, 0.6)
print 'done'
a = 1/0
graph = nx.fast_gnp_random_graph(30, 0.1)
# graph = nx.watts_strogatz_graph(20, 5, 0.7)
graphName = 'random_30_01'
filename= '../results/'+graphName+"onlyBetaMIP_150iter_colored_changes_minAction0__queryLimit5_actionLimit3_agents3_overlap2.csv"
for i in range(5):
systems = []
randSys = RandomSystem(setting = "changes")
mostChanged = MostChangedInIntervalSystem(200) #essentially all revisions...
mostChangeInt = MostChangedInIntervalSystem(5)
latestSys = LatestChangedSystem()
systems.append(randSys)
# systems.append(mostChanged)
mip = Mip()
systems.append(mip)
# systems.append(mostChangeInt)
# systems.append(latestSys)
graphName = graphName+"_"+str(i)
sim = Simulation(graph, 3, 3, systems, overlap = 2, maxIterations = 150, actionLimit = 3, queryLimit = 5, weightInc = 1.0, setting = "changes")
sim.runSimulation(filename,graphName, run = i)
# graph = nx.fast_gnp_random_graph(20, 0.6)
print 'done'
# return
graphName = 'watts_strogatz_graph_20_5_05'
filename= '../results/'+graphName+"__all2_actionMin0_queryLimit5_actionLimit5_agents5_overlap2.csv"
for i in range(1):
systems = []
randSys = RandomSystem(setting = "all")
mostChanged = MostChangedInIntervalSystem(200)
mostChangeInt = MostChangedInIntervalSystem(5)
latestSys = LatestChangedSystem()
systems.append(randSys)
systems.append(mostChanged)
mip = Mip()
systems.append(mip)
systems.append(mostChangeInt)
systems.append(latestSys)
graph = nx.watts_strogatz_graph(20, 5, 0.5)
graphName = graphName+"_"+str(i)
sim = Simulation(graph, 5, 3, systems, overlap = 2, maxIterations = 200, actionLimit = 5, queryLimit = 5, weightInc = 1.0, setting = "all")
sim.runSimulation(filename,graphName)
a = 1/0
systems = []
randSys = RandomSystem()
mostChanged = MostChangedSystem()
mostChangeInt = MostChangedInIntervalSystem(5)
latestSys = LatestChangedSystem()
systems.append(randSys)
systems.append(mostChanged)
mip = Mip()
systems.append(mip)
systems.append(mostChangeInt)
systems.append(latestSys)
graph = nx.watts_strogatz_graph(30, 7, 0.5)
graphName = 'watts_strogatz_graph_30_7_05'
filename= '../results/'+graphName+"__queryLimit3_agents5_overlap2.csv"
for i in range(10):
systems = []
randSys = RandomSystem()
mostChanged = MostChangedSystem()
mostChangeInt = MostChangedInIntervalSystem(5)
latestSys = LatestChangedSystem()
systems.append(randSys)
systems.append(mostChanged)
mip = Mip()
systems.append(mip)
systems.append(mostChangeInt)
systems.append(latestSys)
graphName = graphName+"_"+str(i)
sim = Simulation(graph, 5, 3, systems, overlap = 2, maxIterations = 200, actionLimit = 3, queryLimit = 3, weightInc = 1.0)
sim.runSimulation(filename,graphName)
systems = []
randSys = RandomSystem()
mostChanged = MostChangedSystem()
mostChangeInt = MostChangedInIntervalSystem(5)
latestSys = LatestChangedSystem()
systems.append(randSys)
systems.append(mostChanged)
mip = Mip()
systems.append(mip)
systems.append(mostChangeInt)
systems.append(latestSys)
graph = nx.watts_strogatz_graph(40, 7, 0.5)
graphName = 'watts_strogatz_graph_40_7_05'
filename= '../results/'+graphName+"__queryLimit3_agents5_overlap2.csv"
for i in range(10):
systems = []
randSys = RandomSystem()
mostChanged = MostChangedSystem()
mostChangeInt = MostChangedInIntervalSystem(5)
latestSys = LatestChangedSystem()
systems.append(randSys)
systems.append(mostChanged)
mip = Mip()
systems.append(mip)
systems.append(mostChangeInt)
systems.append(latestSys)
graphName = graphName+"_"+str(i)
sim = Simulation(graph, 5, 3, systems, overlap = 2, maxIterations = 200, actionLimit = 3, queryLimit = 3, weightInc = 1.0)
sim.runSimulation(filename,graphName)
systems = []
randSys = RandomSystem()
mostChanged = MostChangedSystem()
mostChangeInt = MostChangedInIntervalSystem(5)
latestSys = LatestChangedSystem()
systems.append(randSys)
systems.append(mostChanged)
mip = Mip()
systems.append(mip)
systems.append(mostChangeInt)
systems.append(latestSys)
graph = nx.binomial_graph(20, 0.5)
graphName = 'binomial_20_05'
filename= '../results/'+graphName+"__queryLimit3_agents5_overlap2.csv"
for i in range(10):
systems = []
randSys = RandomSystem()
mostChanged = MostChangedSystem()
mostChangeInt = MostChangedInIntervalSystem(5)
latestSys = LatestChangedSystem()
systems.append(randSys)
systems.append(mostChanged)
mip = Mip()
systems.append(mip)
systems.append(mostChangeInt)
systems.append(latestSys)
graphName = graphName+"_"+str(i)
sim = Simulation(graph, 5, 3, systems, overlap = 2, maxIterations = 200, actionLimit = 3, queryLimit = 3, weightInc = 1.0)
sim.runSimulation(filename,graphName)
systems = []
randSys = RandomSystem()
mostChanged = MostChangedSystem()
mostChangeInt = MostChangedInIntervalSystem(5)
latestSys = LatestChangedSystem()
systems.append(randSys)
systems.append(mostChanged)
mip = Mip()
systems.append(mip)
systems.append(mostChangeInt)
systems.append(latestSys)
graph = nx.binomial_graph(30, 0.5)
graphName = 'binomial_30_05'
filename= '../results/'+graphName+"__queryLimit3_agents5_overlap2.csv"
for i in range(10):
systems = []
randSys = RandomSystem()
mostChanged = MostChangedSystem()
mostChangeInt = MostChangedInIntervalSystem(5)
latestSys = LatestChangedSystem()
systems.append(randSys)
systems.append(mostChanged)
mip = Mip()
systems.append(mip)
systems.append(mostChangeInt)
systems.append(latestSys)
graphName = graphName+"_"+str(i)
sim = Simulation(graph, 5, 3, systems, overlap = 2, maxIterations = 200, actionLimit = 3, queryLimit = 3, weightInc = 1.0)
sim.runSimulation(filename,graphName)
| [
"oamir@seas.harvard.edu"
] | oamir@seas.harvard.edu |
a7a1cf9050f6f1d891dfedac7544ea04da552c41 | 502b40993cf50937c972f661bd6768ca607967b9 | /videosummarise.py | 4f16dc8c52da7ade5fbff035a0ab42aa523269f9 | [] | no_license | akshayvit/video-summarisation | 90dd2d7c93347f085e431b5201a2dd81dcc2e633 | 80658912c052f8bb852d4e802a8a283a0f13a565 | refs/heads/master | 2020-09-10T14:37:05.214918 | 2019-11-14T14:55:50 | 2019-11-14T14:55:50 | 221,722,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,490 | py | from __future__ import division
import cv2
import numpy as np
import os
import mahotas
import librosa
from math import *
import imageio
imageio.plugins.ffmpeg.download()
from moviepy.editor import *
from keras.models import model_from_json
from keras.applications.resnet50 import ResNet50
from keras.layers import Flatten, Input
from keras.models import Model
from keras.preprocessing import image
from keras.applications.imagenet_utils import preprocess_input
GENRE_LIST = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']
def create_summary(filename, regions):
subclips = []
input_video = VideoFileClip(filename)
last_end = 0
for (start, end) in regions:
if start>end:
start,end=end,start
subclip = input_video.subclip(start, end)
subclips.append(subclip)
last_end = end
return concatenate_videoclips(subclips)
def get_summary(filename,regions):
summary = create_summary(filename, regions)
base, ext = os.path.splitext(filename)
output = "{0}_1.mp4".format(base)
summary.to_videofile( output, codec="libx264", temp_audiofile="temp.m4a", remove_temp=True, audio_codec="aac")
return True
def getknapSack(W, wt, val, n):
print(n,W)
K = [[0 for w in range(W + 1)]
for i in range(n + 1)]
for i in range(n + 1):
for w in range(W + 1):
if i == 0 or w == 0:
K[i][w] = 0
elif wt[i - 1] <= w:
K[i][w] = max(val[i - 1]
+ K[i - 1][w - wt[i - 1]],
K[i - 1][w])
else:
K[i][w] = K[i - 1][w]
res = K[n][W]
segments=[]
w = W
for i in range(n, 1, -1):
if res <= 0:
break
print(i-1,w)
if res == K[i - 1][w]:
continue
else:
segments.append(i - 1)
res = res - val[i - 1]
w = abs(w - wt[i - 1])
return segments
def fd_hu_moments(image):
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
feature = cv2.HuMoments(cv2.moments(image)).flatten()
return feature
def fd_haralick(image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
haralick = mahotas.features.haralick(gray).mean(axis=0)
return haralick
def fd_histogram(image, mask=None):
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
hist = cv2.calcHist([image], [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256])
cv2.normalize(hist, hist)
return hist.flatten()
def extract_image_features(frame , vector_size=32):
fv_hu_moments = fd_hu_moments(image)
fv_haralick = fd_haralick(image)
fv_histogram = fd_histogram(image)
global_feature = np.hstack([fv_histogram, fv_haralick, fv_hu_moments])
return global_feature
def extract_audio_features(y,sr):
timeseries_length = 2
features = np.zeros((1, timeseries_length , 33), dtype=np.float64)
mfcc = librosa.feature.mfcc(y=y, sr=sr, hop_length=512, n_mfcc=13)
spectral_center = librosa.feature.spectral_centroid(y=y, sr=sr, hop_length=512)
chroma = librosa.feature.chroma_stft(y=y, sr=sr, hop_length=512)
spectral_contrast = librosa.feature.spectral_contrast(y=y, sr=sr, hop_length=512)
features[0, :, 0:13] = mfcc.T[0:timeseries_length, :]
features[0, :, 13:14] = spectral_center.T[0:timeseries_length, :]
features[0, :, 14:26] = chroma.T[0:timeseries_length, :]
features[0, :, 26:33] = spectral_contrast.T[0:timeseries_length, :]
return features
def load_model(model_path, weights_path):
with open(model_path, 'r') as model_file:
trained_model = model_from_json(model_file.read())
trained_model.load_weights(weights_path)
trained_model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return trained_model
def get_genre_audio(model, y, sr):
prediction = model.predict(extract_audio_features(y,sr))
predict_genre = GENRE_LIST[np.argmax(prediction)]
return predict_genre
def get_genre_image(model,res):
futr=extract_image_features(res)
y=futr
y=np.resize(y,(2,33))
futr=np.reshape(y,(1,2,33))
prediction = model.predict(futr)
predict_genre = GENRE_LIST[np.argmax(prediction)]
return predict_genre
cap = cv2.VideoCapture(r"E:\\python3\\posit.mp4")
length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
file_name = r"positaudio.mp3"
audio_time_series, sample_rate = librosa.load(file_name)
length_series = len(audio_time_series)
subdiv=(length_series/length)
#print(subdiv)
audio,imag=[],[]
model=load_model("model.json","model_weights.h5")
for segment in range(0,length_series,int(subdiv)):
y=audio_time_series[segment:segment+int(subdiv)]
audio.append(get_genre_audio(model,y,sample_rate))
success = 1
while success:
success, image = cap.read()
#gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#imag.append(get_genre_image(model,gray))
time=[]
for elm in range(len(audio)):
time.append(subdiv)
W=5000
prob=[]
for i in range(len(audio)):
prob.append(int(audio.count(audio[i])*100/len(audio)))
segments=getknapSack(int(W),prob,time,len(audio))
clips=sorted(segments)
result=[]
last_cons,last=clips[0],clips[0]
for i in range(1,len(clips)):
while(clips[i]==last_cons+1):
last_cons=clips[i]
result.append((last,last_cons))
get_summary(r"E:\\python3\\posit.mp4",result)
| [
"noreply@github.com"
] | noreply@github.com |
4708b963711f432dc94a1be9f0ea5c419e40ab34 | 9174535da20e9329dc2d6330f8e2ed79c2eeb39d | /predict_poetry/web_test_poem.py | 91e116d6b4ead3ef0de4a429f706876a46647946 | [
"MIT"
] | permissive | ASE-couplet/azure_server_data | 9833b91095d559c4c0076829d1e44188c9d3745f | 363c78d4fcb59226df68a11bec24df573af715f6 | refs/heads/master | 2020-04-10T05:02:55.839765 | 2018-12-07T06:46:23 | 2018-12-07T06:46:23 | 160,816,210 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,480 | py | import argparse
import sys
import random
from plan import Planner
from predict import Seq2SeqPredictor
from match import MatchUtil
import sys
sys.path.append("/home/site/wwwroot/predict_poetry")
import tensorflow as tf
class Main_Poetry_maker:
def __init__(self):
self.planner = Planner()
self.predictor = Seq2SeqPredictor()
self.Judge = MatchUtil()
def predict(self, input_ustr):
input_ustr = input_ustr.strip()
keywords = self.planner.plan(input_ustr)
# return str(keywords)
lines = self.predictor.predict(keywords)
result = self.Judge.eval_rhyme(lines)
while(result == False):
lines = self.predictor.predict(keywords)
result = self.Judge.eval_rhyme(lines)
# for line_number in range(4):
# punctuation = u',' if line_number % 4 == 0 else u'。'
# print(u'{keyword}\t\t{line}{punctuation}'.format(
# keyword=keywords[line_number],
# line=lines[line_number],
# punctuation=punctuation
# ))
return '\n'.join(lines)
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--Input', type=str,
help='input from users',
default='')
return parser.parse_args(argv)
if __name__ == '__main__':
input_ustr = parse_arguments(sys.argv[1:]).Input
maker = Main_Poetry_maker()
maker.predict(input_ustr) | [
"v-yufxu@microsoft.com"
] | v-yufxu@microsoft.com |
58574ad0dad5911c3bfcbe11a05877544c674def | 3f327d2654b85b922909925b9f475315d78f4652 | /Backend/lib/python3.6/site-packages/twilio/rest/api/v2010/account/message/media.py | 17f63437924a2838c56dc60c9b02c116a651dff3 | [
"MIT"
] | permissive | brianwang1217/SelfImprovementWebApp | 8db45914027537aee9614f9d218c93cc08dc90f8 | 7892fc4ee5434307b74b14257b29a5f05a0a0dd7 | refs/heads/master | 2022-12-13T15:01:08.595735 | 2018-06-23T04:46:06 | 2018-06-23T04:46:06 | 137,548,289 | 1 | 1 | MIT | 2022-05-25T01:28:29 | 2018-06-16T02:48:52 | Python | UTF-8 | Python | false | false | 14,742 | py | # coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import serialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class MediaList(ListResource):
""" """
def __init__(self, version, account_sid, message_sid):
"""
Initialize the MediaList
:param Version version: Version that contains the resource
:param account_sid: The unique sid that identifies this account
:param message_sid: A string that uniquely identifies this message
:returns: twilio.rest.api.v2010.account.message.media.MediaList
:rtype: twilio.rest.api.v2010.account.message.media.MediaList
"""
super(MediaList, self).__init__(version)
# Path Solution
self._solution = {
'account_sid': account_sid,
'message_sid': message_sid,
}
self._uri = '/Accounts/{account_sid}/Messages/{message_sid}/Media.json'.format(**self._solution)
def stream(self, date_created_before=values.unset, date_created=values.unset,
date_created_after=values.unset, limit=None, page_size=None):
"""
Streams MediaInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param datetime date_created_before: Filter by date created
:param datetime date_created: Filter by date created
:param datetime date_created_after: Filter by date created
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.message.media.MediaInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
date_created_before=date_created_before,
date_created=date_created,
date_created_after=date_created_after,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, date_created_before=values.unset, date_created=values.unset,
date_created_after=values.unset, limit=None, page_size=None):
"""
Lists MediaInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param datetime date_created_before: Filter by date created
:param datetime date_created: Filter by date created
:param datetime date_created_after: Filter by date created
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.message.media.MediaInstance]
"""
return list(self.stream(
date_created_before=date_created_before,
date_created=date_created,
date_created_after=date_created_after,
limit=limit,
page_size=page_size,
))
def page(self, date_created_before=values.unset, date_created=values.unset,
date_created_after=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
"""
Retrieve a single page of MediaInstance records from the API.
Request is executed immediately
:param datetime date_created_before: Filter by date created
:param datetime date_created: Filter by date created
:param datetime date_created_after: Filter by date created
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of MediaInstance
:rtype: twilio.rest.api.v2010.account.message.media.MediaPage
"""
params = values.of({
'DateCreated<': serialize.iso8601_datetime(date_created_before),
'DateCreated': serialize.iso8601_datetime(date_created),
'DateCreated>': serialize.iso8601_datetime(date_created_after),
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return MediaPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of MediaInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of MediaInstance
:rtype: twilio.rest.api.v2010.account.message.media.MediaPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return MediaPage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a MediaContext
:param sid: Fetch by unique media Sid
:returns: twilio.rest.api.v2010.account.message.media.MediaContext
:rtype: twilio.rest.api.v2010.account.message.media.MediaContext
"""
return MediaContext(
self._version,
account_sid=self._solution['account_sid'],
message_sid=self._solution['message_sid'],
sid=sid,
)
def __call__(self, sid):
"""
Constructs a MediaContext
:param sid: Fetch by unique media Sid
:returns: twilio.rest.api.v2010.account.message.media.MediaContext
:rtype: twilio.rest.api.v2010.account.message.media.MediaContext
"""
return MediaContext(
self._version,
account_sid=self._solution['account_sid'],
message_sid=self._solution['message_sid'],
sid=sid,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.MediaList>'
class MediaPage(Page):
""" """
def __init__(self, version, response, solution):
"""
Initialize the MediaPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param account_sid: The unique sid that identifies this account
:param message_sid: A string that uniquely identifies this message
:returns: twilio.rest.api.v2010.account.message.media.MediaPage
:rtype: twilio.rest.api.v2010.account.message.media.MediaPage
"""
super(MediaPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of MediaInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.message.media.MediaInstance
:rtype: twilio.rest.api.v2010.account.message.media.MediaInstance
"""
return MediaInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
message_sid=self._solution['message_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.MediaPage>'
class MediaContext(InstanceContext):
""" """
def __init__(self, version, account_sid, message_sid, sid):
"""
Initialize the MediaContext
:param Version version: Version that contains the resource
:param account_sid: The account_sid
:param message_sid: The message_sid
:param sid: Fetch by unique media Sid
:returns: twilio.rest.api.v2010.account.message.media.MediaContext
:rtype: twilio.rest.api.v2010.account.message.media.MediaContext
"""
super(MediaContext, self).__init__(version)
# Path Solution
self._solution = {
'account_sid': account_sid,
'message_sid': message_sid,
'sid': sid,
}
self._uri = '/Accounts/{account_sid}/Messages/{message_sid}/Media/{sid}.json'.format(**self._solution)
def delete(self):
"""
Deletes the MediaInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete('delete', self._uri)
def fetch(self):
"""
Fetch a MediaInstance
:returns: Fetched MediaInstance
:rtype: twilio.rest.api.v2010.account.message.media.MediaInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return MediaInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
message_sid=self._solution['message_sid'],
sid=self._solution['sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Api.V2010.MediaContext {}>'.format(context)
class MediaInstance(InstanceResource):
""" """
def __init__(self, version, payload, account_sid, message_sid, sid=None):
"""
Initialize the MediaInstance
:returns: twilio.rest.api.v2010.account.message.media.MediaInstance
:rtype: twilio.rest.api.v2010.account.message.media.MediaInstance
"""
super(MediaInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload['account_sid'],
'content_type': payload['content_type'],
'date_created': deserialize.rfc2822_datetime(payload['date_created']),
'date_updated': deserialize.rfc2822_datetime(payload['date_updated']),
'parent_sid': payload['parent_sid'],
'sid': payload['sid'],
'uri': payload['uri'],
}
# Context
self._context = None
self._solution = {
'account_sid': account_sid,
'message_sid': message_sid,
'sid': sid or self._properties['sid'],
}
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: MediaContext for this MediaInstance
:rtype: twilio.rest.api.v2010.account.message.media.MediaContext
"""
if self._context is None:
self._context = MediaContext(
self._version,
account_sid=self._solution['account_sid'],
message_sid=self._solution['message_sid'],
sid=self._solution['sid'],
)
return self._context
@property
def account_sid(self):
"""
:returns: The unique sid that identifies this account
:rtype: unicode
"""
return self._properties['account_sid']
@property
def content_type(self):
"""
:returns: The default mime-type of the media
:rtype: unicode
"""
return self._properties['content_type']
@property
def date_created(self):
"""
:returns: The date this resource was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The date this resource was last updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def parent_sid(self):
"""
:returns: The unique id of the resource that created the media.
:rtype: unicode
"""
return self._properties['parent_sid']
@property
def sid(self):
"""
:returns: A string that uniquely identifies this media
:rtype: unicode
"""
return self._properties['sid']
@property
def uri(self):
"""
:returns: The URI for this resource
:rtype: unicode
"""
return self._properties['uri']
def delete(self):
"""
Deletes the MediaInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
def fetch(self):
"""
Fetch a MediaInstance
:returns: Fetched MediaInstance
:rtype: twilio.rest.api.v2010.account.message.media.MediaInstance
"""
return self._proxy.fetch()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Api.V2010.MediaInstance {}>'.format(context)
| [
"talk2shreyas@gmail.com"
] | talk2shreyas@gmail.com |
c232565a21f5fd047d7623a39c6b072c1a8a23e5 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp-with-texts/HUAWEI-MUSA-MA5100-MIB.py | 4fef2a9becd67f938eaf9afaece57bb5779b3134 | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 102,545 | py | #
# PySNMP MIB module HUAWEI-MUSA-MA5100-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HUAWEI-MUSA-MA5100-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:44:44 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, SingleValueConstraint, ValueSizeConstraint, ValueRangeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "SingleValueConstraint", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsIntersection")
musa, = mibBuilder.importSymbols("HUAWEI-MIB", "musa")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Counter64, NotificationType, ObjectIdentity, Gauge32, Counter32, ModuleIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, TimeTicks, iso, MibIdentifier, Unsigned32, Integer32, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "Counter64", "NotificationType", "ObjectIdentity", "Gauge32", "Counter32", "ModuleIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "TimeTicks", "iso", "MibIdentifier", "Unsigned32", "Integer32", "Bits")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
class DisplayString(OctetString):
pass
hwMa5100Mib = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5))
hwMusaSysMib = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1))
hwMusaDevice = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 1))
hwMusaEndOfMib = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 100))
hwMusaSysDate = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 1, 9), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaSysDate.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSysDate.setDescription('the system date,include year,month,day.')
hwMusaSysTime = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 1, 10), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaSysTime.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSysTime.setDescription('the system time,include hours,minutes,seconds.')
hwMusaSysCpuRatio = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 1, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaSysCpuRatio.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSysCpuRatio.setDescription('the musa system cpu ratio.')
hwMusaHostVersion = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 1, 12), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaHostVersion.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaHostVersion.setDescription('the musa system host software version.')
hwMusaResetSys = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 1, 13), Integer32()).setMaxAccess("writeonly")
if mibBuilder.loadTexts: hwMusaResetSys.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaResetSys.setDescription('Reset Musa device.')
hwMusaIpAddr = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 1, 14), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaIpAddr.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaIpAddr.setDescription("The Musa device's Ethernet IP address.")
hwMusaIpMask = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 1, 15), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaIpMask.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaIpMask.setDescription('The Musa IP SubNet Mask.')
hwMusaGatewayIpAddr = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 1, 16), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaGatewayIpAddr.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaGatewayIpAddr.setDescription("The Musa gateway's IP address.")
hwMusaMacAddr = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 1, 17), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaMacAddr.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaMacAddr.setDescription('The Musa Mac address.')
hwMusaIpAddrPermitTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 1, 18), )
if mibBuilder.loadTexts: hwMusaIpAddrPermitTable.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaIpAddrPermitTable.setDescription('This table contains IP Addr scope that Musa device permit access.')
hwMusaIpAddrPermitEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 1, 18, 1), ).setIndexNames((0, "HUAWEI-MUSA-MA5100-MIB", "hwMusaIpPermitTableId"))
if mibBuilder.loadTexts: hwMusaIpAddrPermitEntry.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaIpAddrPermitEntry.setDescription('This list contains IP addr scope.')
hwMusaIpPermitTableId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 1, 18, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaIpPermitTableId.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaIpPermitTableId.setDescription('The IP Address table ID that permit access the Musa device.')
hwMusaIpAddrPermitOper = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 1, 18, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("add", 0), ("del", 1), ("modify", 2), ("query", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaIpAddrPermitOper.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaIpAddrPermitOper.setDescription('The operation that permit access the Musa device.')
hwMusaPermitBeginIp = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 1, 18, 1, 3), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaPermitBeginIp.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaPermitBeginIp.setDescription('The begin IP Address that permit access the Musa device.')
hwMusaPermitEndIp = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 1, 18, 1, 4), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaPermitEndIp.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaPermitEndIp.setDescription('The end Ip address that permit access the Musa device.')
hwMusaPermitIpMask = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 1, 18, 1, 5), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaPermitIpMask.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaPermitIpMask.setDescription('The Ip address subnet mask that permit access the Musa device.')
hwMusaIpAddrRejectTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 1, 19), )
if mibBuilder.loadTexts: hwMusaIpAddrRejectTable.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaIpAddrRejectTable.setDescription('This table contains IP Addr scope that Musa device reject access.')
hwMusaIpAddrRejectEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 1, 19, 1), ).setIndexNames((0, "HUAWEI-MUSA-MA5100-MIB", "hwMusaIpRejectTableId"))
if mibBuilder.loadTexts: hwMusaIpAddrRejectEntry.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaIpAddrRejectEntry.setDescription('This list contains IP addr scope.')
hwMusaIpRejectTableId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 1, 19, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaIpRejectTableId.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaIpRejectTableId.setDescription('The IP Address table ID that reject access the Musa device.')
hwMusaIpAddrRejectOper = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 1, 19, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("add", 0), ("del", 1), ("modify", 2), ("query", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaIpAddrRejectOper.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaIpAddrRejectOper.setDescription('The operation that reject access the Musa device.')
hwMusaRejectBeginIp = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 1, 19, 1, 3), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaRejectBeginIp.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaRejectBeginIp.setDescription('The begin IP Address that reject access the Musa device.')
hwMusaRejectEndIp = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 1, 19, 1, 4), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaRejectEndIp.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaRejectEndIp.setDescription('The end Ip address that reject access the Musa device.')
hwMusaRejectIpMask = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 1, 19, 1, 5), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaRejectIpMask.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaRejectIpMask.setDescription('The Ip address subnet mask that reject access the Musa device.')
hwMusaAtmIpAddr = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 1, 20), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaAtmIpAddr.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaAtmIpAddr.setDescription("The Musa device's IP address.")
hwMusaAtmIpMask = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 1, 21), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaAtmIpMask.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaAtmIpMask.setDescription('The Musa IP SubNet Mask.')
hwMusaMtu = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 1, 22), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaMtu.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaMtu.setDescription('The Musa Mtu value.')
hwMusaOpticConvergentRate = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 1, 23), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaOpticConvergentRate.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaOpticConvergentRate.setDescription('The bandwidth convergentrate.')
hwMusaCellbusID = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 1, 24), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ma5100", 1), ("ma5103", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaCellbusID.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaCellbusID.setDescription('The Cellbus ID.')
hwMusaResetSlaveMMX = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 1, 25), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("loaddata", 1), ("noloaddata", 2)))).setMaxAccess("writeonly")
if mibBuilder.loadTexts: hwMusaResetSlaveMMX.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaResetSlaveMMX.setDescription('Reset SLAVE MMX.')
hwMusaBiosVersion = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 1, 26), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaBiosVersion.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaBiosVersion.setDescription('MMX Bios Version.')
hwMusaEthernetFirewall = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 1, 27), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaEthernetFirewall.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaEthernetFirewall.setDescription('MMX ethernet firewall switch.')
hwMusaNmsPvcConfTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 3), )
if mibBuilder.loadTexts: hwMusaNmsPvcConfTable.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaNmsPvcConfTable.setDescription('Musa Nms PVC configuration table.')
hwMusaNmsPvcConfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 3, 1), ).setIndexNames((0, "HUAWEI-MUSA-MA5100-MIB", "hwMusaNmsPvcIndex"))
if mibBuilder.loadTexts: hwMusaNmsPvcConfEntry.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaNmsPvcConfEntry.setDescription('This list contains Musa Nms Pvc Configuration parameters and variables.')
hwMusaNmsPvcIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 3, 1, 1), Integer32())
if mibBuilder.loadTexts: hwMusaNmsPvcIndex.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaNmsPvcIndex.setDescription('Nms Pvc index.')
hwMusaNmsRelayVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 3, 1, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaNmsRelayVpi.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaNmsRelayVpi.setDescription('Nms Relay Pvc Vpi index.')
hwMusaNmsRelayVci = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 3, 1, 3), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaNmsRelayVci.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaNmsRelayVci.setDescription('Nms Relay Pvc Vci index.')
hwMusaNmsIp = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 3, 1, 4), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaNmsIp.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaNmsIp.setDescription('Nms IP address.')
hwMusaNmsPvcOper = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 3, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("add", 0), ("del", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaNmsPvcOper.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaNmsPvcOper.setDescription('Nms operate state,include:add/del.')
hwMusaNmsRxTraffic = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 3, 1, 8), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaNmsRxTraffic.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaNmsRxTraffic.setDescription("Nms PVC's receive traffic index.")
hwMusaNmsTxTraffic = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 3, 1, 9), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaNmsTxTraffic.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaNmsTxTraffic.setDescription("Nms PVC's transmit traffic index.")
hwMusaNmsSarVci = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 3, 1, 10), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaNmsSarVci.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaNmsSarVci.setDescription("Nms PVC's SAR VCI")
hwMusaNmsLLCVC = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 3, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("llc", 1), ("vc", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaNmsLLCVC.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaNmsLLCVC.setDescription('1483B encapsulation state,include:llc/vc.')
hwMusaNmsENCAP = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 3, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("eipoa", 0), ("e1483B", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaNmsENCAP.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaNmsENCAP.setDescription('Nms encapsulation type,include:ipoa/1483B.')
hwMusaNmsFrameId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 3, 1, 14), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaNmsFrameId.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaNmsFrameId.setDescription("Nms PVC's DstFrameId.")
hwMusaNmsSlotId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 3, 1, 15), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaNmsSlotId.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaNmsSlotId.setDescription("Nms PVC's DstSlotId.")
hwMusaNmsPortVlanId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 3, 1, 16), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaNmsPortVlanId.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaNmsPortVlanId.setDescription("Nms PVC's DstPortVlanId.")
hwMusaNmsParaConfTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 5), )
if mibBuilder.loadTexts: hwMusaNmsParaConfTable.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaNmsParaConfTable.setDescription('Musa Nms PVC configuration table.')
hwMusaNmsParaConfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 5, 1), ).setIndexNames((0, "HUAWEI-MUSA-MA5100-MIB", "hwMusaNmsID"))
if mibBuilder.loadTexts: hwMusaNmsParaConfEntry.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaNmsParaConfEntry.setDescription('This list contains Musa Nms Para Configuration parameters and variables.')
hwMusaNmsID = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 5, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 7)))
if mibBuilder.loadTexts: hwMusaNmsID.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaNmsID.setDescription('Nms ID,from 0 to 7.')
hwMusaNmsOperState = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 5, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 4, 5))).clone(namedValues=NamedValues(("add", 0), ("del", 1), ("modify", 2), ("active", 4), ("deactive", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaNmsOperState.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaNmsOperState.setDescription('Nms operate state,include:add/del/modify/act/deact.')
hwMusaNmsName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 5, 1, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaNmsName.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaNmsName.setDescription('Nms name.')
hwMusaNmsIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 5, 1, 4), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaNmsIpAddr.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaNmsIpAddr.setDescription('Nms IP address.')
hwMusaGetCommunity = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 5, 1, 5), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaGetCommunity.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaGetCommunity.setDescription('Snmp get community name.')
hwMusaSetCommunity = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 5, 1, 6), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaSetCommunity.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSetCommunity.setDescription('Snmp set community name.')
hwMusaTrapPort = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 5, 1, 7), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaTrapPort.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaTrapPort.setDescription('Snmp trap port.')
hwMusaGetSetPort = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 5, 1, 8), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaGetSetPort.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaGetSetPort.setDescription('Snmp get/set port.')
hwMusaNmsStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 5, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("active", 1), ("deactive", 2), ("commfail", 3), ("uninstall", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaNmsStatus.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaNmsStatus.setDescription('Nms status.')
hwMusaNmsStyle = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 5, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("bandin", 0), ("bandout", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaNmsStyle.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaNmsStyle.setDescription('Nms admin style in-band or out-band.')
hwMusaSlotGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6))
hwMusaShelf = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 1))
hwMusaFrame = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 2))
hwMusaSlot = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 3))
hwMusaShelfNumber = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaShelfNumber.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaShelfNumber.setDescription('Musa shelf numbers.')
hwMusaShelfConfTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 1, 2), )
if mibBuilder.loadTexts: hwMusaShelfConfTable.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaShelfConfTable.setDescription('This table contains Musa slot configuration parameters, one entry per Musa slot.')
hwMusaShelfConfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 1, 2, 1), ).setIndexNames((0, "HUAWEI-MUSA-MA5100-MIB", "hwMusaShelfIndex"))
if mibBuilder.loadTexts: hwMusaShelfConfEntry.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaShelfConfEntry.setDescription('This list contains Musa shelf configuration parameters and status variables.')
hwMusaShelfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 1, 2, 1, 1), Integer32())
if mibBuilder.loadTexts: hwMusaShelfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaShelfIndex.setDescription('the Musa shelf number. ')
hwMusaShelfType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 1, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("other", 1), ("empty", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaShelfType.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaShelfType.setDescription('The Musa shelf type.')
hwMusaShelfName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 1, 2, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaShelfName.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaShelfName.setDescription('The Musa shelf name.')
hwMusaFrameNumbers = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 1, 2, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaFrameNumbers.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaFrameNumbers.setDescription('The frame numbers belong to this shelf.')
hwMusaFrameNumber = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 2, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaFrameNumber.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaFrameNumber.setDescription('Musa frame numbers.')
hwMusaFrameConfTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 2, 2), )
if mibBuilder.loadTexts: hwMusaFrameConfTable.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaFrameConfTable.setDescription('This table contains Musa Frame configuration parameters.')
hwMusaFrameConfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 2, 2, 1), ).setIndexNames((0, "HUAWEI-MUSA-MA5100-MIB", "hwMusaShelfIndex"), (0, "HUAWEI-MUSA-MA5100-MIB", "hwMusaFrameIndex"))
if mibBuilder.loadTexts: hwMusaFrameConfEntry.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaFrameConfEntry.setDescription('This list contains Musa Frame configuration parameters and status variables.')
hwMusaFrameIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 2, 2, 1, 1), Integer32())
if mibBuilder.loadTexts: hwMusaFrameIndex.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaFrameIndex.setDescription('The Musa Frame number.')
hwMusaFrameType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 2, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("other", 1), ("empty", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaFrameType.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaFrameType.setDescription('The Musa Frame type.')
hwMusaFrameName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 2, 2, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 20))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaFrameName.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaFrameName.setDescription('The Musa Frame name.')
hwMusaSlotNumbers = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 2, 2, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaSlotNumbers.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSlotNumbers.setDescription('The Slot numbers belong to this frame.')
hwMusaFrameBandWidth = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 2, 2, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaFrameBandWidth.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaFrameBandWidth.setDescription("The bandwidth(kpbs) of frame's backplane.")
hwMusaFrameUsedBandWidth = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 2, 2, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaFrameUsedBandWidth.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaFrameUsedBandWidth.setDescription("The current used bandwidth(kpbs) of frame's backplane.")
hwMusaSlotNumber = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 3, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaSlotNumber.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSlotNumber.setDescription('Musa slot numbers.')
hwMusaSlotConfTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 3, 2), )
if mibBuilder.loadTexts: hwMusaSlotConfTable.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSlotConfTable.setDescription('This table contains Musa slot configuration parameters, one entry per Musa slot.')
hwMusaSlotConfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 3, 2, 1), ).setIndexNames((0, "HUAWEI-MUSA-MA5100-MIB", "hwMusaFrameIndex"), (0, "HUAWEI-MUSA-MA5100-MIB", "hwMusaSlotIndex"))
if mibBuilder.loadTexts: hwMusaSlotConfEntry.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSlotConfEntry.setDescription('This list contains Musa slot configuration parameters and status variables.')
hwMusaSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 3, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 15)))
if mibBuilder.loadTexts: hwMusaSlotIndex.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSlotIndex.setDescription('the Musa slot number.')
hwMusaSlotCardType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 3, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 18, 19, 21, 25))).clone(namedValues=NamedValues(("null", 0), ("mmx", 1), ("smx", 2), ("adl", 3), ("lanb", 4), ("lana", 5), ("cesa", 6), ("cesb", 7), ("spl", 8), ("fra", 9), ("adlb", 10), ("unknown", 11), ("splb", 12), ("sep", 13), ("smxa", 14), ("smxb", 15), ("pots", 16), ("splc", 18), ("lan", 19), ("adlc", 21), ("adld", 25)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaSlotCardType.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSlotCardType.setDescription('the Musa slot card type. The whole 16 bits is distributed to two parts. The whole low 8 bits is the type of board. In the high 8 bits, the whole high 4 bits is the type of the first subboard, the whole low 4 bits is the type of the second subboard. The subboard types of all kinds of board are listed below: To mmx: 0x00MMX_NO_SUBBOARD 0x01MMX_OIM_S155SM: 0x02MMX_OIM_S155MM: 0x03MMX_OIM_D155SM: 0x04MMX_OIM_D155MM: 0x05MMX_APON_R: 0x06MMX_APON_C: 0x07: MMX_IMA_TYPE 0x08: MMX_IMA_TYPE_120 To frc: 0x00FRC_NON_SUBBOARD 0x01FRC_E1 0x02FRC_T1 0x03FRC_V35 0x04: FRC_E1_120 To cesc: 0x00CESC_NON_SUBBOARD 0x01CESC_E1 0x02CESC_T1 0x03CESC_V35 0x04: CESC_E1_120 To aiua: 0x00AIU_NO_SUBBOARD 0x01AIU_155M_SINGLE_PATH 0x02AIU_155M_SINGLE_PATH_M 0x03AIU_155M_DOUBLE_PATH 0x04AIU_155M_DOUBLE_PATH_M 0x05AIU_622M 0x06AIU_APON_C 0x07AIU_APON_R 0x08AIU_E3 0x09: AIU_IMA 0x0a: AIU_IMA_120')
hwMusaSlotCardSerial = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 3, 2, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaSlotCardSerial.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSlotCardSerial.setDescription('The Musa slot card serial.')
hwMusaSlotCardVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 3, 2, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaSlotCardVersion.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSlotCardVersion.setDescription('The Musa slot card version.')
hwMusaSlotIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 3, 2, 1, 5), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaSlotIpAddress.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSlotIpAddress.setDescription('The Musa slot card Ip address.')
hwMusaSlotCardAdminStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 3, 2, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13))).clone(namedValues=NamedValues(("noinstall", 0), ("normal", 1), ("fault", 2), ("mainnormal", 3), ("mainfault", 4), ("baknormal", 5), ("bakfault", 6), ("forbid", 7), ("config", 8), ("online", 10), ("none", 11), ("commok", 12), ("commfail", 13)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaSlotCardAdminStatus.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSlotCardAdminStatus.setDescription('The Musa slot card State.')
hwMusaSlotCardOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 3, 2, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9))).clone(namedValues=NamedValues(("del", 0), ("add", 1), ("reset", 2), ("use", 3), ("nouse", 4), ("inverse", 5), ("mmxswitchover", 6), ("delmmxsubboard", 7), ("addaiusubboard", 8), ("delaiusubboard", 9)))).setMaxAccess("writeonly")
if mibBuilder.loadTexts: hwMusaSlotCardOperStatus.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSlotCardOperStatus.setDescription('The slot card operate status.')
hwMusaSlotDescript = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 3, 2, 1, 8), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaSlotDescript.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSlotDescript.setDescription('The sl1ot card description.')
hwMusaBoardCellLossPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 3, 2, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("low", 0), ("high", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaBoardCellLossPriority.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaBoardCellLossPriority.setDescription('The board service priority.')
hwMusaBoardMaxBandwidth = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 3, 2, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("b-155M", 0), ("b-80M", 1), ("b-20M", 2), ("b-4M", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaBoardMaxBandwidth.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaBoardMaxBandwidth.setDescription('The board Max outburst cell rate.')
hwMusaCpuOccupyRate = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 3, 2, 1, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaCpuOccupyRate.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaCpuOccupyRate.setDescription('The board cpu occupy rate.')
hwMusaQueryMemory = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 3, 2, 1, 12), DisplayString()).setMaxAccess("writeonly")
if mibBuilder.loadTexts: hwMusaQueryMemory.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaQueryMemory.setDescription('Query the board memory.')
hwMusaLoadProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 3, 2, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0))).clone(namedValues=NamedValues(("tftp", 0)))).setMaxAccess("writeonly")
if mibBuilder.loadTexts: hwMusaLoadProtocol.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaLoadProtocol.setDescription('The protocal of load file:tftp.')
hwMusaLoadContent = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 3, 2, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(6, 8, 7))).clone(namedValues=NamedValues(("program", 6), ("data", 8), ("fpga", 7)))).setMaxAccess("writeonly")
if mibBuilder.loadTexts: hwMusaLoadContent.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaLoadContent.setDescription('The type of load file:program or data.')
hwMusaLoadTftpServerIp = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 3, 2, 1, 15), IpAddress()).setMaxAccess("writeonly")
if mibBuilder.loadTexts: hwMusaLoadTftpServerIp.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaLoadTftpServerIp.setDescription('The NMS IP address.')
hwMusaLoadFileName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 3, 2, 1, 16), DisplayString()).setMaxAccess("writeonly")
if mibBuilder.loadTexts: hwMusaLoadFileName.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaLoadFileName.setDescription('The load file name.')
hwMusaLoadOperType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 3, 2, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4))).clone(namedValues=NamedValues(("load", 0), ("upback", 1), ("downback", 2), ("rollback", 3), ("clearflash", 4)))).setMaxAccess("writeonly")
if mibBuilder.loadTexts: hwMusaLoadOperType.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaLoadOperType.setDescription('The type of back data.')
hwMusaSlotUpBandWidth = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 3, 2, 1, 18), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaSlotUpBandWidth.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSlotUpBandWidth.setDescription('The bandwidth(kpbs) of Up slot.')
hwMusaSlotDownBandWidth = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 3, 2, 1, 19), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaSlotDownBandWidth.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSlotDownBandWidth.setDescription('The main bandwidth(kpbs) of Down slot.')
hwMusaSlotUsedUpBandWidth = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 3, 2, 1, 20), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaSlotUsedUpBandWidth.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSlotUsedUpBandWidth.setDescription('The current used up bandwidth(kpbs) of slot.')
hwMusaSlotUsedDownBandWidth = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 3, 2, 1, 21), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaSlotUsedDownBandWidth.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSlotUsedDownBandWidth.setDescription('The current used down bandwidth(kpbs) of slot.')
hwMusaSlotUpPracticalBandWidth = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 3, 2, 1, 22), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaSlotUpPracticalBandWidth.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSlotUpPracticalBandWidth.setDescription('The practical up bandwidth(kpbs) of slot.')
hwMusaSlotDownPracticalBandWidth = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 6, 3, 2, 1, 23), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaSlotDownPracticalBandWidth.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSlotDownPracticalBandWidth.setDescription('The practical down bandwidth(kpbs) of slot.')
hwMusaOamGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 7))
hwMusaOimPhyTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 7, 1), )
if mibBuilder.loadTexts: hwMusaOimPhyTable.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaOimPhyTable.setDescription('Musa OIM physical table.')
hwMusaOimPhyEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 7, 1, 1), ).setIndexNames((0, "HUAWEI-MUSA-MA5100-MIB", "hwMusaFrameIndex"), (0, "HUAWEI-MUSA-MA5100-MIB", "hwMusaSlotIndex"), (0, "HUAWEI-MUSA-MA5100-MIB", "hwOIMPortIndex"))
if mibBuilder.loadTexts: hwMusaOimPhyEntry.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaOimPhyEntry.setDescription('This list contains Musa OIM physical variables.')
hwOIMPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 7, 1, 1, 1), Integer32())
if mibBuilder.loadTexts: hwOIMPortIndex.setStatus('mandatory')
if mibBuilder.loadTexts: hwOIMPortIndex.setDescription('the Musa OIM number.(0--1) ')
hwMusaSetSrcLoop = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 7, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("notloop", 0), ("loop", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaSetSrcLoop.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSetSrcLoop.setDescription('set source loop.')
hwMusaSetLineLoop = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 7, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("notloop", 0), ("loop", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaSetLineLoop.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSetLineLoop.setDescription('set line loop.')
hwMusaSetUtopiaLoop = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 7, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("notloop", 0), ("loop", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaSetUtopiaLoop.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSetUtopiaLoop.setDescription('set Utopia loop.')
hwMusaInsertLOF = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 7, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("notinsert", 0), ("insert", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaInsertLOF.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaInsertLOF.setDescription('insert LOF warning at the optic port.')
hwMusaInsertLOS = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 7, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("notinsert", 0), ("insert", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaInsertLOS.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaInsertLOS.setDescription('insert LOS warning at the optic port.')
hwMusaInsertBIP1 = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 7, 1, 1, 7), Integer32()).setMaxAccess("writeonly")
if mibBuilder.loadTexts: hwMusaInsertBIP1.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaInsertBIP1.setDescription('Insert BIP1(SBIP) errorcode at the optic port.')
hwMusaInsertBIP2 = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 7, 1, 1, 8), Integer32()).setMaxAccess("writeonly")
if mibBuilder.loadTexts: hwMusaInsertBIP2.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaInsertBIP2.setDescription('Insert BIP2(LBIP) errorcode at the optic port.')
hwMusaInsertBIP3 = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 7, 1, 1, 9), Integer32()).setMaxAccess("writeonly")
if mibBuilder.loadTexts: hwMusaInsertBIP3.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaInsertBIP3.setDescription('Insert BIP3(PBIP) errorcode at the optic port.')
hwMusaInsertLAIS = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 7, 1, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("notinsert", 0), ("insert", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaInsertLAIS.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaInsertLAIS.setDescription('Insert LAIS warning at the optic port.')
hwMusaInsertPAIS = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 7, 1, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("notinsert", 0), ("insert", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaInsertPAIS.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaInsertPAIS.setDescription('Insert PAIS warning at the optic port.')
hwMusaInsertLRDI = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 7, 1, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("notinsert", 0), ("insert", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaInsertLRDI.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaInsertLRDI.setDescription('Insert LRDI warning at the optic port.')
hwMusaOimOpticTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 7, 2), )
if mibBuilder.loadTexts: hwMusaOimOpticTable.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaOimOpticTable.setDescription('Musa OIM optical performance table.')
hwMusaOimOpticEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 7, 2, 1), ).setIndexNames((0, "HUAWEI-MUSA-MA5100-MIB", "hwMusaFrameIndex"), (0, "HUAWEI-MUSA-MA5100-MIB", "hwMusaSlotIndex"), (0, "HUAWEI-MUSA-MA5100-MIB", "hwOIMPortIndex"))
if mibBuilder.loadTexts: hwMusaOimOpticEntry.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaOimOpticEntry.setDescription('This list contains Musa OIM optical performance variables.')
hwMusaQueryCurBIP1 = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 7, 2, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaQueryCurBIP1.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaQueryCurBIP1.setDescription('Query current BIP1 errorcode since last query.')
hwMusaQueryCurBIP2 = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 7, 2, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaQueryCurBIP2.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaQueryCurBIP2.setDescription('Query current BIP2 errorcode since last query.')
hwMusaQueryCurBIP3 = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 7, 2, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaQueryCurBIP3.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaQueryCurBIP3.setDescription('Query current BIP3 errorcode since last query.')
hwMusaQueryCurLFEBE = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 7, 2, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaQueryCurLFEBE.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaQueryCurLFEBE.setDescription('Query current LFEBE errorcode since last query.')
hwMusaQueryCurPFEBE = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 7, 2, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaQueryCurPFEBE.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaQueryCurPFEBE.setDescription('Query current PFEBE errorcode since last query.')
hwMusaQueryCurSendCellNum = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 7, 2, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaQueryCurSendCellNum.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaQueryCurSendCellNum.setDescription('Query current send cell numbers since last query.')
hwMusaQueryCurReceiveCellNum = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 7, 2, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaQueryCurReceiveCellNum.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaQueryCurReceiveCellNum.setDescription('Query current receive cell numbers since last query.')
hwMusaQueryCurCorrectHECNum = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 7, 2, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaQueryCurCorrectHECNum.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaQueryCurCorrectHECNum.setDescription('Query current can correct HEC numbers since last query.')
hwMusaQueryCurNonCorrectHECNum = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 7, 2, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaQueryCurNonCorrectHECNum.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaQueryCurNonCorrectHECNum.setDescription('Query current cannot correct HEC numbers since last query.')
hwMusaQueryCurLOCDNum = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 7, 2, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaQueryCurLOCDNum.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaQueryCurLOCDNum.setDescription('Query current LOCD numbers since last query.')
hwMusaQueryCurUnmatchCellNum = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 7, 2, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaQueryCurUnmatchCellNum.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaQueryCurUnmatchCellNum.setDescription('Query current Unmatch cell numbers since last query.')
hwMusaQueryCurOOFNum = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 7, 2, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaQueryCurOOFNum.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaQueryCurOOFNum.setDescription('Query current OOF numbers since last query.')
hwMusaClearAllAlarmStat = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 7, 2, 1, 13), Integer32()).setMaxAccess("writeonly")
if mibBuilder.loadTexts: hwMusaClearAllAlarmStat.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaClearAllAlarmStat.setDescription('Clear port cell statistics.')
hwMusaClearOIMErrEventStat = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 7, 2, 1, 14), Integer32()).setMaxAccess("writeonly")
if mibBuilder.loadTexts: hwMusaClearOIMErrEventStat.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaClearOIMErrEventStat.setDescription('Clear all OIM alarm statistics.')
hwMusaWarningCtrlTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 9), )
if mibBuilder.loadTexts: hwMusaWarningCtrlTable.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaWarningCtrlTable.setDescription('Musa warning control table.')
hwMusaWarningCtrlEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 9, 1), ).setIndexNames((0, "HUAWEI-MUSA-MA5100-MIB", "hwMusaWarningID"))
if mibBuilder.loadTexts: hwMusaWarningCtrlEntry.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaWarningCtrlEntry.setDescription('This list contains Musa warning Control parameters.')
hwMusaWarningID = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 9, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaWarningID.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaWarningID.setDescription('The MUSA Warning ID.')
hwMusaWarningLevel = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 9, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4))).clone(namedValues=NamedValues(("notify", 0), ("normal", 1), ("serious", 2), ("fatal", 3), ("default", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaWarningLevel.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaWarningLevel.setDescription('The MUSA Warning level.')
hwMusaWarningNmsCtrl = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 9, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("off", 0), ("on", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaWarningNmsCtrl.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaWarningNmsCtrl.setDescription('The MUSA Warning NMS filter.')
hwMusaWarningTerminalCtrl = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 9, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("off", 0), ("on", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaWarningTerminalCtrl.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaWarningTerminalCtrl.setDescription('The MUSA Warning NMS filter.')
hwMusaWarningIsCount = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 9, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("off", 0), ("on", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaWarningIsCount.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaWarningIsCount.setDescription('The Warning is statics or not.')
hwMusaWarn15MinThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 9, 1, 6), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaWarn15MinThreshold.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaWarn15MinThreshold.setDescription('The Warning 15 Minute threshold.')
hwMusaWarn24HourThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 9, 1, 7), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaWarn24HourThreshold.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaWarn24HourThreshold.setDescription('The Warning 24 Hour threshold.')
hwMusaWarningDesc = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 9, 1, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaWarningDesc.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaWarningDesc.setDescription('The MUSA Warning Chinese description.')
hwMusaWarningEngDesc = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 9, 1, 9), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaWarningEngDesc.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaWarningEngDesc.setDescription('The MUSA Warning English description.')
hwMusaSysRouteTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 10), )
if mibBuilder.loadTexts: hwMusaSysRouteTable.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSysRouteTable.setDescription('Musa sys route table.')
hwMusaSysRouteEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 10, 1), ).setIndexNames((0, "HUAWEI-MUSA-MA5100-MIB", "hwMusaSysRouteIndex"))
if mibBuilder.loadTexts: hwMusaSysRouteEntry.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSysRouteEntry.setDescription('This list contains Musa Sys Route Configuration parameters and variables.')
hwMusaSysRouteIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 10, 1, 1), Integer32())
if mibBuilder.loadTexts: hwMusaSysRouteIndex.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSysRouteIndex.setDescription('Sys Route index.')
hwMusaDstIp = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 10, 1, 2), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaDstIp.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaDstIp.setDescription('Destination IP address.')
hwMusaDstIpMask = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 10, 1, 3), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaDstIpMask.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaDstIpMask.setDescription('Destination IP address Mask.')
hwMusaGateIp = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 10, 1, 4), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaGateIp.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaGateIp.setDescription('Gateway ipaddree.')
hwMusaSysRouteOper = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 10, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("add", 0), ("del", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaSysRouteOper.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSysRouteOper.setDescription('Sys route operate state,include:add/del.')
hwMusaLoadRateTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 11), )
if mibBuilder.loadTexts: hwMusaLoadRateTable.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaLoadRateTable.setDescription('This table contains Musa slot load rate.')
hwMusaLoadRateEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 11, 1), ).setIndexNames((0, "HUAWEI-MUSA-MA5100-MIB", "hwMusaFrameIndex"), (0, "HUAWEI-MUSA-MA5100-MIB", "hwMusaSlotIndex"))
if mibBuilder.loadTexts: hwMusaLoadRateEntry.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaLoadRateEntry.setDescription('This list contains Musa slot Load rate variables.')
hwMusaLoadRate = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 11, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaLoadRate.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaLoadRate.setDescription('Load progress.')
hwMusaLoadType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 11, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("noOper", 0), ("backData", 1), ("dumpData", 2), ("loadData", 3), ("loadProc", 4), ("loadFpga", 5), ("program", 6), ("fpga", 7), ("data", 8)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaLoadType.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaLoadType.setDescription('Load type.')
hwMusaTrafficTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 13), )
if mibBuilder.loadTexts: hwMusaTrafficTable.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaTrafficTable.setDescription('Musa Pvc Traffic table.')
hwMusaTrafficEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 13, 1), ).setIndexNames((0, "HUAWEI-MUSA-MA5100-MIB", "hwMusaTrafficIndex"))
if mibBuilder.loadTexts: hwMusaTrafficEntry.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaTrafficEntry.setDescription('This list contains Musa Pvc Traffic Configuration parameters and variables.')
hwMusaTrafficIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 13, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 5000)))
if mibBuilder.loadTexts: hwMusaTrafficIndex.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaTrafficIndex.setDescription('Musa Traffic Table Index.')
hwMusaTrafficType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 13, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14))).clone(namedValues=NamedValues(("noclpnoscr", 1), ("clpnotaggingnoscr", 2), ("clptaggingnoscr", 3), ("noclpscr", 4), ("clpnotaggingscr", 5), ("clptaggingscr", 6), ("clpnotaggingmcr", 7), ("clptransparentnoscr", 8), ("clptransparentscr", 9), ("noclptaggingnoscr", 10), ("noclpnoscrcdvt", 11), ("noclpscrcdvt", 12), ("clpnotaggingscrcdvt", 13), ("clptaggingscrcdvt", 14)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaTrafficType.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaTrafficType.setDescription('Musa Traffic table type.(0:NOCLPNOSCR,1:CLPNOTAGGINGNOSCR, 2:CLPTAGGINGNOSCR,3:NOCLPSCR,4:CLPNOTAGGINGSCR,5:CLPTAGGINGSCR, 6:CLPNOTAGGINGMCR,7:CLPTRANSPARENTNOSCR,8:CLPTRANSPARENTSCR, 9:NOCLPTAGGINGNOSCR,10:NOCLPNOSCRCDVT,11;NOCLPSCRCDVT,12;CLPNOTAGGINGSCRCDVT, 13:CLPTAGGINGSCRCDVT)')
hwMusaServiceClass = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 13, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 3, 4, 6))).clone(namedValues=NamedValues(("cbr", 2), ("rtVBR", 3), ("nrtVBR", 4), ("ubr", 6)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaServiceClass.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaServiceClass.setDescription('Musa Traffic service class.(1:other,2:CBR,3:rtVBR,4:nrtVBR,5:ABR:6:UBR)')
hwMusaRefCount = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 13, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaRefCount.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaRefCount.setDescription('Musa Traffic Traffic Record reference count.')
hwMusaRecordState = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 13, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("disable", 0), ("enable", 1), ("module", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaRecordState.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaRecordState.setDescription('Musa Traffic table record state. disable(0),enable(1),module(2).')
hwMusaClp01pcr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 13, 1, 6), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaClp01pcr.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaClp01pcr.setDescription('Musa Traffic table parameter Clp01pcr.')
hwMusaClp0pcr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 13, 1, 7), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaClp0pcr.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaClp0pcr.setDescription('Musa Traffic table parameter Clp0pcr.')
hwMusaClp01scr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 13, 1, 8), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaClp01scr.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaClp01scr.setDescription('Musa Traffic table parameter Clp01scr.')
hwMusaClp0scr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 13, 1, 9), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaClp0scr.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaClp0scr.setDescription('Musa Traffic table parameter Clp0scr.')
hwMusaMbs = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 13, 1, 10), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaMbs.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaMbs.setDescription('Musa Traffic table parameter Mbs.')
hwMusaMcr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 13, 1, 11), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaMcr.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaMcr.setDescription('Musa Traffic table parameter Mcr.')
hwMusaCDVT = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 13, 1, 12), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaCDVT.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaCDVT.setDescription('Musa Traffic table parameter CDVT')
hwMusaOperat = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 13, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("add", 0), ("del", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaOperat.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaOperat.setDescription('Musa Traffic table operat.(0:add,1:del)')
hwMusaNextTrafficIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 13, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaNextTrafficIndex.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaNextTrafficIndex.setDescription('Musa Traffic table next traffic index')
hwMusaCampusPvcConfTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 15), )
if mibBuilder.loadTexts: hwMusaCampusPvcConfTable.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaCampusPvcConfTable.setDescription('Musa campus pvc configuration table.')
hwMusaCampusPvcConfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 15, 1), ).setIndexNames((0, "HUAWEI-MUSA-MA5100-MIB", "hwMusaFrameIndex"), (0, "HUAWEI-MUSA-MA5100-MIB", "hwMusaSlotIndex"), (0, "HUAWEI-MUSA-MA5100-MIB", "hwMusaVlanId"), (0, "HUAWEI-MUSA-MA5100-MIB", "hwMusaVlanIciIndex"))
if mibBuilder.loadTexts: hwMusaCampusPvcConfEntry.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaCampusPvcConfEntry.setDescription('This list contains Musa campus pvc Configuration parameters and variables.')
hwMusaVlanId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 15, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 63))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaVlanId.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaVlanId.setDescription('Musa campus VLan Id.')
hwMusaVlanIciIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 15, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaVlanIciIndex.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaVlanIciIndex.setDescription('Musa campus VLanIciIndex.')
hwMusaAdlPortCount = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 15, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaAdlPortCount.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaAdlPortCount.setDescription('Musa campus adsl port counter.')
hwMusaAdlFrameId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 15, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaAdlFrameId.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaAdlFrameId.setDescription('Musa campus ADL frame ID.')
hwMusaAdlSlotId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 15, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 15))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaAdlSlotId.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaAdlSlotId.setDescription('Musa campus ADL slot ID.')
hwMusaAdlPortId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 15, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 15))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaAdlPortId.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaAdlPortId.setDescription('Musa campus ADL port ID.')
hwMusaAdlVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 15, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 127))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaAdlVpi.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaAdlVpi.setDescription('Musa campus VPI.')
hwMusaAdlVci = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 15, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(32, 127))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaAdlVci.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaAdlVci.setDescription('Musa campus VCI.')
hwMusaToLanTrafficId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 15, 1, 9), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaToLanTrafficId.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaToLanTrafficId.setDescription('Musa campus to lan board traffic index.')
hwMusaFromLanTrafficId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 15, 1, 10), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaFromLanTrafficId.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaFromLanTrafficId.setDescription('Musa campus from lan board traffic index.')
hwMusaAdlPortOperat = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 15, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("add", 0), ("del", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaAdlPortOperat.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaAdlPortOperat.setDescription('Musa campus pvc Operat')
hwMusaOpticBandwidthTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 17), )
if mibBuilder.loadTexts: hwMusaOpticBandwidthTable.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaOpticBandwidthTable.setDescription('Musa Optic port bandwidth configuration table.')
hwMusaOpticBandwidthEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 17, 1), ).setIndexNames((0, "HUAWEI-MUSA-MA5100-MIB", "hwMusaFrameIndex"), (0, "HUAWEI-MUSA-MA5100-MIB", "hwMusaSlotIndex"), (0, "HUAWEI-MUSA-MA5100-MIB", "hwOIMPortIndex"))
if mibBuilder.loadTexts: hwMusaOpticBandwidthEntry.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaOpticBandwidthEntry.setDescription('This list contains Musa optic port bandwidth Configuration.')
hwMusaUpOpticMainBandWidth = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 17, 1, 1), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaUpOpticMainBandWidth.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaUpOpticMainBandWidth.setDescription('The main bandwidth(kpbs) of Up optic port.')
hwMusaDnOpticMainBandWidth = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 17, 1, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaDnOpticMainBandWidth.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaDnOpticMainBandWidth.setDescription('The main bandwidth(kpbs) of Down optic port.')
hwMusaCurUsedUpBandWidth = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 17, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaCurUsedUpBandWidth.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaCurUsedUpBandWidth.setDescription('The current used up bandwidth(kpbs) of the optic port.')
hwMusaCurUsedDownBandWidth = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 17, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaCurUsedDownBandWidth.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaCurUsedDownBandWidth.setDescription('The current used down bandwidth(kpbs) of the optic port.')
hwMusaUpReservedBandWidth = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 17, 1, 5), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaUpReservedBandWidth.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaUpReservedBandWidth.setDescription('The up reserved bandwidth of the optic port.')
hwMusaDownReservedBandWidth = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 17, 1, 6), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaDownReservedBandWidth.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaDownReservedBandWidth.setDescription('The down reserved bandwidth of the optic port.')
hwMusaUpPracticalBandWidth = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 17, 1, 7), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaUpPracticalBandWidth.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaUpPracticalBandWidth.setDescription('The up practical bandwidth of the optic port.')
hwMusaDownPracticalBandWidth = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 17, 1, 8), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaDownPracticalBandWidth.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaDownPracticalBandWidth.setDescription('The down practical bandwidth of the optic port.')
hwMusaTrafficCbrPcrTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 18), )
if mibBuilder.loadTexts: hwMusaTrafficCbrPcrTable.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaTrafficCbrPcrTable.setDescription('CBR traffic PCR parameter table.')
hwMusaTrafficCbrPcrEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 18, 1), ).setIndexNames((0, "HUAWEI-MUSA-MA5100-MIB", "hwMusaCbrPcrIndex"))
if mibBuilder.loadTexts: hwMusaTrafficCbrPcrEntry.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaTrafficCbrPcrEntry.setDescription("This list contains CBR traffic's PCR parameter.")
hwMusaCbrPcrIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 18, 1, 1), Integer32())
if mibBuilder.loadTexts: hwMusaCbrPcrIndex.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaCbrPcrIndex.setDescription('The index of CBR traffic PCR parameter table.')
hwMusaCbrPcrValue = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 18, 1, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaCbrPcrValue.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaCbrPcrValue.setDescription('The CBR traffic PCR parameter value.')
hwMusaCbrPcrRefCount = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 18, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaCbrPcrRefCount.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaCbrPcrRefCount.setDescription('The CBR traffic PCR parameter reference count.')
hwMusaTrafficRtvbrScrTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 19), )
if mibBuilder.loadTexts: hwMusaTrafficRtvbrScrTable.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaTrafficRtvbrScrTable.setDescription('rtVbr traffic SCR parameter table.')
hwMusaTrafficRtvbrScrEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 19, 1), ).setIndexNames((0, "HUAWEI-MUSA-MA5100-MIB", "hwMusaRtvbrScrIndex"))
if mibBuilder.loadTexts: hwMusaTrafficRtvbrScrEntry.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaTrafficRtvbrScrEntry.setDescription("This list contains rtVbr traffic's SCR parameter.")
hwMusaRtvbrScrIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 19, 1, 1), Integer32())
if mibBuilder.loadTexts: hwMusaRtvbrScrIndex.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaRtvbrScrIndex.setDescription('The index of rtVbr traffic SCR parameter table.')
hwMusaRtvbrScrValue = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 19, 1, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaRtvbrScrValue.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaRtvbrScrValue.setDescription('The rtVbr traffic SCR parameter value.')
hwMusaRtvbrScrRefCount = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 19, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaRtvbrScrRefCount.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaRtvbrScrRefCount.setDescription('The rtVbr traffic SCR parameter reference count.')
hwMusaPvcTrafficStatisTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 21), )
if mibBuilder.loadTexts: hwMusaPvcTrafficStatisTable.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaPvcTrafficStatisTable.setDescription('PVC traffic statistics table.')
hwMusaPvcTrafficStatisEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 21, 1), ).setIndexNames((0, "HUAWEI-MUSA-MA5100-MIB", "hwMusaSPvcIndex"))
if mibBuilder.loadTexts: hwMusaPvcTrafficStatisEntry.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaPvcTrafficStatisEntry.setDescription('PVC traffic statistics table entry.')
hwMusaUpStreamTrafficRx = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 21, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaUpStreamTrafficRx.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaUpStreamTrafficRx.setDescription('Up stream traffic Rx(cells).')
hwMusaUpStreamTrafficTx = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 21, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaUpStreamTrafficTx.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaUpStreamTrafficTx.setDescription('Up stream traffic Tx(cells).')
hwMusaDownStreamTrafficRx = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 21, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaDownStreamTrafficRx.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaDownStreamTrafficRx.setDescription('Down stream traffic Rx(cells).')
hwMusaDownStreamTrafficTx = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 21, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaDownStreamTrafficTx.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaDownStreamTrafficTx.setDescription('Down stream traffic Tx(cells).')
hwMusaAllPvcConfTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 22), )
if mibBuilder.loadTexts: hwMusaAllPvcConfTable.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaAllPvcConfTable.setDescription('The config talbe of all kinds of PVC.')
hwMusaAllPvcConfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 22, 1), ).setIndexNames((0, "HUAWEI-MUSA-MA5100-MIB", "hwMusaTypeOfPvcPvp"), (0, "HUAWEI-MUSA-MA5100-MIB", "hwMusaCidIndex"))
if mibBuilder.loadTexts: hwMusaAllPvcConfEntry.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaAllPvcConfEntry.setDescription('This lists config variables of all kinds of PVC.')
hwMusaCidIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 22, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaCidIndex.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaCidIndex.setDescription('The CID index.')
hwMusaSrcFrameId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 22, 1, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaSrcFrameId.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSrcFrameId.setDescription('The Frame id of source board.')
hwMuasSrcSlotId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 22, 1, 3), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMuasSrcSlotId.setStatus('mandatory')
if mibBuilder.loadTexts: hwMuasSrcSlotId.setDescription('The slot id of source board.')
hwMusaSrcPortVlanVccId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 22, 1, 4), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaSrcPortVlanVccId.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSrcPortVlanVccId.setDescription('The Port/VlanID/VCC Index of source board.')
hwMusaSrcOnuId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 22, 1, 5), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaSrcOnuId.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSrcOnuId.setDescription('The ONUID of source board.')
hwMusaSrcBoardVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 22, 1, 6), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaSrcBoardVpi.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSrcBoardVpi.setDescription('Src board Port vpi Index.')
hwMusaSrcBoardVci = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 22, 1, 7), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaSrcBoardVci.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSrcBoardVci.setDescription('Src board VCI Index.')
hwMusaSrcPortType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 22, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("uni", 0), ("sdt", 1), ("udt", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaSrcPortType.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSrcPortType.setDescription('The type of CESC port.')
hwMusaSrcCescChannelId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 22, 1, 9), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaSrcCescChannelId.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSrcCescChannelId.setDescription('Cesc channel Id')
hwMusaSrcCescChannelBitmap = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 22, 1, 10), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaSrcCescChannelBitmap.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSrcCescChannelBitmap.setDescription('Cesc channel Bitmap')
hwMusaSrcCescFillDegree = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 22, 1, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(20, 47))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaSrcCescFillDegree.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSrcCescFillDegree.setDescription('Cesc fill degree(20-47)')
hwMusaSrcFrcDlciType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 22, 1, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 5))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaSrcFrcDlciType.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSrcFrcDlciType.setDescription('The ATM Logical Port DLCI.')
hwMusaSrcFrcIwfType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 22, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("network11", 0), ("service", 1), ("hdlc", 2), ("networkN1", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaSrcFrcIwfType.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSrcFrcIwfType.setDescription('The ATM Logical Port have service.')
hwMusaSrcFrcActiveStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 22, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("block", 0), ("unblock", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaSrcFrcActiveStatus.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSrcFrcActiveStatus.setDescription('The FRC ATM logical port BLOCK/UNBLOCK.')
hwMusaSrcFrcFreeBandwidth = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 22, 1, 15), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaSrcFrcFreeBandwidth.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSrcFrcFreeBandwidth.setDescription('The ATM logical port free bandwidth.')
hwMusaSrcApcConnectAttribute = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 22, 1, 16), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaSrcApcConnectAttribute.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSrcApcConnectAttribute.setDescription('The APC connection attribute of MMX(source) board.')
hwMusaSrcCescV35N = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 22, 1, 17), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaSrcCescV35N.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSrcCescV35N.setDescription('The N value of V35 subboard of CESC.')
hwMusaDestFrameId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 22, 1, 20), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaDestFrameId.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaDestFrameId.setDescription('Destination frame Index.')
hwMusaDestSlotId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 22, 1, 21), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaDestSlotId.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaDestSlotId.setDescription('Destination slot Index.')
hwMusaDestPortVlanVccId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 22, 1, 22), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaDestPortVlanVccId.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaDestPortVlanVccId.setDescription('Destination port Index.')
hwMusaDestOnuId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 22, 1, 23), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaDestOnuId.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaDestOnuId.setDescription('Destination ONU Index.')
hwMusaDestBoardVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 22, 1, 24), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaDestBoardVpi.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaDestBoardVpi.setDescription('Destination Port vpi Index.')
hwMusaDestBoardVci = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 22, 1, 25), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaDestBoardVci.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaDestBoardVci.setDescription('Destination VCI Index.')
hwMusaDestPortType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 22, 1, 26), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("uni", 0), ("sdt", 1), ("udt", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaDestPortType.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaDestPortType.setDescription('The type of CESC port.')
hwMusaDestCescChannelId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 22, 1, 27), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaDestCescChannelId.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaDestCescChannelId.setDescription('Cesc channelId')
hwMusaDestCescChannelBitmap = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 22, 1, 28), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaDestCescChannelBitmap.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaDestCescChannelBitmap.setDescription('Cesc channelbitmap')
hwMusaDestCescFillDegree = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 22, 1, 29), Integer32().subtype(subtypeSpec=ValueRangeConstraint(20, 47))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaDestCescFillDegree.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaDestCescFillDegree.setDescription('Cesc fill degree(20-47)')
hwMusaDestFrcDlciType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 22, 1, 30), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 5))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaDestFrcDlciType.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaDestFrcDlciType.setDescription('The ATM Logical Port DLCI.')
hwMusaDestFrcIwfType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 22, 1, 31), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("network11", 0), ("service", 1), ("hdlc", 2), ("networkN1", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaDestFrcIwfType.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaDestFrcIwfType.setDescription('The ATM Logical Port have service.')
hwMusaDestFrcActiveStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 22, 1, 32), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("block", 0), ("unblock", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaDestFrcActiveStatus.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaDestFrcActiveStatus.setDescription('The FRC ATM logical port BLOCK/UNBLOCK.')
hwMusaDestFrcFreeBandwidth = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 22, 1, 33), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaDestFrcFreeBandwidth.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaDestFrcFreeBandwidth.setDescription('The ATM logical port free bandwidth.')
hwMusaDestApcConnectAttribute = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 22, 1, 34), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaDestApcConnectAttribute.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaDestApcConnectAttribute.setDescription('connect attribut of AIU to source board.')
hwMusaDestCescV35N = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 22, 1, 35), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaDestCescV35N.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaDestCescV35N.setDescription('The N value of V35 subboard of CESC.')
hwMusaSrcToDestTraffic = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 22, 1, 38), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaSrcToDestTraffic.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaSrcToDestTraffic.setDescription('The source board to destination traffic.')
hwMusaDestToSrcTraffic = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 22, 1, 39), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaDestToSrcTraffic.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaDestToSrcTraffic.setDescription('The destination to source board traffic.')
hwMusaAllPvcOperater = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 22, 1, 40), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("add", 0), ("del", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaAllPvcOperater.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaAllPvcOperater.setDescription('Add or delete PVC.')
hwMusaTypeOfPvcPvp = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 22, 1, 41), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("pvc", 0), ("pvp", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaTypeOfPvcPvp.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaTypeOfPvcPvp.setDescription('Identified type of Pvc or Pvp')
hwMusaPvcPvpState = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 22, 1, 42), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("normal", 1), ("invalid", 2), ("delete", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaPvcPvpState.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaPvcPvpState.setDescription('Pvc or Pvp state ')
hwMusaPvcCidTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 23), )
if mibBuilder.loadTexts: hwMusaPvcCidTable.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaPvcCidTable.setDescription('The CID talbe of all kinds of PVC.')
hwMusaPvcCidEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 23, 1), ).setIndexNames((0, "HUAWEI-MUSA-MA5100-MIB", "hwMusaFrameIndex"), (0, "HUAWEI-MUSA-MA5100-MIB", "hwMusaSlotIndex"), (0, "HUAWEI-MUSA-MA5100-MIB", "hwMusaSrcPortVlanVccId"), (0, "HUAWEI-MUSA-MA5100-MIB", "hwMusaSrcOnuId"), (0, "HUAWEI-MUSA-MA5100-MIB", "hwMusaSrcBoardVpi"), (0, "HUAWEI-MUSA-MA5100-MIB", "hwMusaSrcBoardVci"))
if mibBuilder.loadTexts: hwMusaPvcCidEntry.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaPvcCidEntry.setDescription('This lists CID corresponding to the PVC.')
hwMusaPvcCid = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 23, 1, 1), Counter32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwMusaPvcCid.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaPvcCid.setDescription('The CID corresponding to the PVC.')
hwMusaPatchOperateTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 24), )
if mibBuilder.loadTexts: hwMusaPatchOperateTable.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaPatchOperateTable.setDescription('Musa Mmx patch table.')
hwMusaPatchOperateEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 24, 1), ).setIndexNames((0, "HUAWEI-MUSA-MA5100-MIB", "hwMusaPatchIdIndex"))
if mibBuilder.loadTexts: hwMusaPatchOperateEntry.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaPatchOperateEntry.setDescription('This list contains Musa patch description variables.')
hwMusaPatchIdIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 24, 1, 1), Integer32())
if mibBuilder.loadTexts: hwMusaPatchIdIndex.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaPatchIdIndex.setDescription('The PatchId index.')
hwMusaPatchLoadProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 24, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("tftp", 1)))).setMaxAccess("writeonly")
if mibBuilder.loadTexts: hwMusaPatchLoadProtocol.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaPatchLoadProtocol.setDescription('The patch load protocol.')
hwMusaPatchLoadFilename = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 24, 1, 3), DisplayString()).setMaxAccess("writeonly")
if mibBuilder.loadTexts: hwMusaPatchLoadFilename.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaPatchLoadFilename.setDescription('The patch load filename.')
hwMusaPatchLoadSerIp = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 24, 1, 4), IpAddress()).setMaxAccess("writeonly")
if mibBuilder.loadTexts: hwMusaPatchLoadSerIp.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaPatchLoadSerIp.setDescription('The patch load server IP.')
hwMusaPatchOper = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 24, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("activate", 1), ("deactivate", 2), ("load", 3), ("remove", 4), ("run", 5)))).setMaxAccess("writeonly")
if mibBuilder.loadTexts: hwMusaPatchOper.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaPatchOper.setDescription('The patch load operate type.')
hwMusaPatchTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 25), )
if mibBuilder.loadTexts: hwMusaPatchTable.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaPatchTable.setDescription('Musa Mmx patch table.')
hwMusaPatchEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 25, 1), ).setIndexNames((0, "HUAWEI-MUSA-MA5100-MIB", "hwMusaPatchIdIndex"))
if mibBuilder.loadTexts: hwMusaPatchEntry.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaPatchEntry.setDescription('This list contains Musa patch description variables.')
hwMusaPatchShowIdIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 25, 1, 1), Integer32())
if mibBuilder.loadTexts: hwMusaPatchShowIdIndex.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaPatchShowIdIndex.setDescription('The PatchId index.')
hwMusaPatchCRC = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 25, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaPatchCRC.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaPatchCRC.setDescription('The Patch CRC.')
hwMusaPatchType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 25, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("c-Commonpatch", 1), ("t-Temporarypatch", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaPatchType.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaPatchType.setDescription('The Patch type.')
hwMusaPatchState = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 25, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("run", 1), ("activate", 2), ("deactivate", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaPatchState.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaPatchState.setDescription('The Patch state.')
hwMusaPatchCodeAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 25, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaPatchCodeAddress.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaPatchCodeAddress.setDescription('The Patch code address.')
hwMusaPatchCodeLength = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 25, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaPatchCodeLength.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaPatchCodeLength.setDescription('The Patch code length.')
hwMusaPatchDataAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 25, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaPatchDataAddress.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaPatchDataAddress.setDescription('The Patch data address.')
hwMusaPatchDataLength = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 25, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaPatchDataLength.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaPatchDataLength.setDescription('The Patch data length.')
hwMusaPatchFunctionNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 1, 25, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMusaPatchFunctionNumber.setStatus('mandatory')
if mibBuilder.loadTexts: hwMusaPatchFunctionNumber.setDescription('The Patch function number.')
hwMa5100EndOfMib = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 6, 5, 100, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMa5100EndOfMib.setStatus('mandatory')
if mibBuilder.loadTexts: hwMa5100EndOfMib.setDescription('End of HW MA5100 mib')
mibBuilder.exportSymbols("HUAWEI-MUSA-MA5100-MIB", hwMusaSetUtopiaLoop=hwMusaSetUtopiaLoop, hwMusaIpAddr=hwMusaIpAddr, hwMusaLoadType=hwMusaLoadType, hwMusaAllPvcOperater=hwMusaAllPvcOperater, hwMusaMcr=hwMusaMcr, hwMusaFrameNumber=hwMusaFrameNumber, hwMusaSysTime=hwMusaSysTime, hwMusaWarningDesc=hwMusaWarningDesc, hwMusaDownStreamTrafficTx=hwMusaDownStreamTrafficTx, hwMusaAdlPortId=hwMusaAdlPortId, hwMusaQueryCurOOFNum=hwMusaQueryCurOOFNum, hwMusaFrameConfTable=hwMusaFrameConfTable, hwMusaOpticBandwidthTable=hwMusaOpticBandwidthTable, hwMusaRecordState=hwMusaRecordState, hwMusaNmsPvcIndex=hwMusaNmsPvcIndex, hwMusaNmsRxTraffic=hwMusaNmsRxTraffic, hwMusaVlanIciIndex=hwMusaVlanIciIndex, hwMusaDestToSrcTraffic=hwMusaDestToSrcTraffic, hwMusaOimOpticEntry=hwMusaOimOpticEntry, hwMusaIpAddrPermitTable=hwMusaIpAddrPermitTable, hwMusaDestFrameId=hwMusaDestFrameId, hwMusaIpAddrRejectEntry=hwMusaIpAddrRejectEntry, hwMusaUpPracticalBandWidth=hwMusaUpPracticalBandWidth, hwMusaIpPermitTableId=hwMusaIpPermitTableId, hwOIMPortIndex=hwOIMPortIndex, hwMusaSrcFrameId=hwMusaSrcFrameId, hwMusaShelfNumber=hwMusaShelfNumber, hwMusaNmsIp=hwMusaNmsIp, hwMusaSlotNumbers=hwMusaSlotNumbers, hwMusaWarningNmsCtrl=hwMusaWarningNmsCtrl, hwMusaDestFrcDlciType=hwMusaDestFrcDlciType, hwMusaQueryCurCorrectHECNum=hwMusaQueryCurCorrectHECNum, hwMusaQueryCurLFEBE=hwMusaQueryCurLFEBE, hwMusaDestSlotId=hwMusaDestSlotId, hwMusaSrcFrcActiveStatus=hwMusaSrcFrcActiveStatus, hwMusaWarningLevel=hwMusaWarningLevel, hwMusaEthernetFirewall=hwMusaEthernetFirewall, hwMusaInsertLOF=hwMusaInsertLOF, hwMusaVlanId=hwMusaVlanId, hwMusaSysRouteEntry=hwMusaSysRouteEntry, hwMusaRtvbrScrRefCount=hwMusaRtvbrScrRefCount, hwMusaSlotGroup=hwMusaSlotGroup, hwMusaSrcPortVlanVccId=hwMusaSrcPortVlanVccId, hwMusaPatchLoadProtocol=hwMusaPatchLoadProtocol, hwMusaAtmIpMask=hwMusaAtmIpMask, hwMusaOpticConvergentRate=hwMusaOpticConvergentRate, hwMusaFrameIndex=hwMusaFrameIndex, hwMusaWarningID=hwMusaWarningID, hwMusaNmsIpAddr=hwMusaNmsIpAddr, hwMusaSlotDownPracticalBandWidth=hwMusaSlotDownPracticalBandWidth, hwMusaIpAddrPermitOper=hwMusaIpAddrPermitOper, hwMusaLoadTftpServerIp=hwMusaLoadTftpServerIp, hwMusaWarningCtrlTable=hwMusaWarningCtrlTable, hwMusaInsertLRDI=hwMusaInsertLRDI, hwMusaSrcPortType=hwMusaSrcPortType, hwMusaNmsTxTraffic=hwMusaNmsTxTraffic, hwMusaNmsPvcConfTable=hwMusaNmsPvcConfTable, hwMusaFrameType=hwMusaFrameType, hwMusaSlotCardAdminStatus=hwMusaSlotCardAdminStatus, hwMusaShelf=hwMusaShelf, hwMusaOamGroup=hwMusaOamGroup, hwMusaNmsID=hwMusaNmsID, hwMusaWarn15MinThreshold=hwMusaWarn15MinThreshold, hwMusaNmsParaConfTable=hwMusaNmsParaConfTable, hwMusaAdlVpi=hwMusaAdlVpi, hwMusaIpAddrRejectOper=hwMusaIpAddrRejectOper, hwMusaSlotNumber=hwMusaSlotNumber, hwMusaBoardMaxBandwidth=hwMusaBoardMaxBandwidth, hwMusaGateIp=hwMusaGateIp, hwMusaSrcFrcIwfType=hwMusaSrcFrcIwfType, hwMusaSlotUpPracticalBandWidth=hwMusaSlotUpPracticalBandWidth, hwMa5100Mib=hwMa5100Mib, hwMusaQueryCurBIP1=hwMusaQueryCurBIP1, hwMusaClearAllAlarmStat=hwMusaClearAllAlarmStat, hwMusaSysRouteIndex=hwMusaSysRouteIndex, hwMusaPvcCidTable=hwMusaPvcCidTable, hwMusaAllPvcConfTable=hwMusaAllPvcConfTable, hwMusaFromLanTrafficId=hwMusaFromLanTrafficId, hwMusaSrcCescChannelId=hwMusaSrcCescChannelId, hwMusaLoadProtocol=hwMusaLoadProtocol, hwMusaInsertBIP1=hwMusaInsertBIP1, hwMusaPatchLoadSerIp=hwMusaPatchLoadSerIp, hwMusaPatchShowIdIndex=hwMusaPatchShowIdIndex, hwMusaTrafficCbrPcrEntry=hwMusaTrafficCbrPcrEntry, hwMusaPatchOper=hwMusaPatchOper, hwMusaPatchFunctionNumber=hwMusaPatchFunctionNumber, hwMusaRejectBeginIp=hwMusaRejectBeginIp, hwMusaSlotCardVersion=hwMusaSlotCardVersion, hwMusaPvcTrafficStatisEntry=hwMusaPvcTrafficStatisEntry, hwMusaBoardCellLossPriority=hwMusaBoardCellLossPriority, hwMusaClp01pcr=hwMusaClp01pcr, hwMusaEndOfMib=hwMusaEndOfMib, hwMusaNmsParaConfEntry=hwMusaNmsParaConfEntry, hwMusaResetSlaveMMX=hwMusaResetSlaveMMX, hwMusaPatchCRC=hwMusaPatchCRC, hwMusaShelfConfEntry=hwMusaShelfConfEntry, hwMusaTrafficIndex=hwMusaTrafficIndex, hwMusaUpStreamTrafficTx=hwMusaUpStreamTrafficTx, hwMusaSlotIpAddress=hwMusaSlotIpAddress, hwMusaShelfName=hwMusaShelfName, hwMusaPvcTrafficStatisTable=hwMusaPvcTrafficStatisTable, hwMusaSrcBoardVpi=hwMusaSrcBoardVpi, hwMusaSlotConfEntry=hwMusaSlotConfEntry, hwMusaTrafficEntry=hwMusaTrafficEntry, hwMusaToLanTrafficId=hwMusaToLanTrafficId, hwMusaUpStreamTrafficRx=hwMusaUpStreamTrafficRx, hwMusaTrafficType=hwMusaTrafficType, hwMusaDestFrcActiveStatus=hwMusaDestFrcActiveStatus, hwMusaMbs=hwMusaMbs, hwMusaNmsName=hwMusaNmsName, hwMusaQueryCurReceiveCellNum=hwMusaQueryCurReceiveCellNum, DisplayString=DisplayString, hwMusaFrameUsedBandWidth=hwMusaFrameUsedBandWidth, hwMusaResetSys=hwMusaResetSys, hwMusaFrameName=hwMusaFrameName, hwMusaQueryCurBIP3=hwMusaQueryCurBIP3, hwMusaPatchOperateTable=hwMusaPatchOperateTable, hwMusaFrameBandWidth=hwMusaFrameBandWidth, hwMusaRejectEndIp=hwMusaRejectEndIp, hwMusaTrafficRtvbrScrTable=hwMusaTrafficRtvbrScrTable, hwMusaOimPhyTable=hwMusaOimPhyTable, hwMusaGetCommunity=hwMusaGetCommunity, hwMusaNmsSarVci=hwMusaNmsSarVci, hwMusaIpRejectTableId=hwMusaIpRejectTableId, hwMusaAtmIpAddr=hwMusaAtmIpAddr, hwMusaAdlVci=hwMusaAdlVci, hwMusaPatchCodeLength=hwMusaPatchCodeLength, hwMusaLoadRateTable=hwMusaLoadRateTable, hwMusaNmsOperState=hwMusaNmsOperState, hwMusaSlotCardOperStatus=hwMusaSlotCardOperStatus, hwMusaSlotIndex=hwMusaSlotIndex, hwMusaDestFrcIwfType=hwMusaDestFrcIwfType, hwMusaPatchCodeAddress=hwMusaPatchCodeAddress, hwMusaLoadRate=hwMusaLoadRate, hwMusaShelfIndex=hwMusaShelfIndex, hwMusaTrafficRtvbrScrEntry=hwMusaTrafficRtvbrScrEntry, hwMusaSrcBoardVci=hwMusaSrcBoardVci, hwMusaCurUsedUpBandWidth=hwMusaCurUsedUpBandWidth, hwMusaSysCpuRatio=hwMusaSysCpuRatio, hwMusaSrcCescV35N=hwMusaSrcCescV35N, hwMusaPatchType=hwMusaPatchType, hwMusaDstIpMask=hwMusaDstIpMask, hwMusaCbrPcrIndex=hwMusaCbrPcrIndex, hwMusaDestBoardVci=hwMusaDestBoardVci, hwMusaIpMask=hwMusaIpMask, hwMusaDnOpticMainBandWidth=hwMusaDnOpticMainBandWidth, hwMusaPvcCidEntry=hwMusaPvcCidEntry, hwMusaLoadFileName=hwMusaLoadFileName, hwMusaOpticBandwidthEntry=hwMusaOpticBandwidthEntry, hwMusaSetLineLoop=hwMusaSetLineLoop, hwMusaUpOpticMainBandWidth=hwMusaUpOpticMainBandWidth, hwMusaTrafficTable=hwMusaTrafficTable, hwMusaWarningCtrlEntry=hwMusaWarningCtrlEntry, hwMusaNmsRelayVci=hwMusaNmsRelayVci, hwMusaDownPracticalBandWidth=hwMusaDownPracticalBandWidth, hwMusaMtu=hwMusaMtu, hwMusaDestBoardVpi=hwMusaDestBoardVpi, hwMusaPatchOperateEntry=hwMusaPatchOperateEntry, hwMusaShelfType=hwMusaShelfType, hwMusaDownReservedBandWidth=hwMusaDownReservedBandWidth, hwMusaInsertLOS=hwMusaInsertLOS, hwMusaQueryCurLOCDNum=hwMusaQueryCurLOCDNum, hwMusaTypeOfPvcPvp=hwMusaTypeOfPvcPvp, hwMusaNmsPvcConfEntry=hwMusaNmsPvcConfEntry, hwMusaPatchState=hwMusaPatchState, hwMusaPvcPvpState=hwMusaPvcPvpState, hwMusaDevice=hwMusaDevice, hwMusaPermitIpMask=hwMusaPermitIpMask, hwMusaClp01scr=hwMusaClp01scr, hwMusaClp0scr=hwMusaClp0scr, hwMusaRejectIpMask=hwMusaRejectIpMask, hwMusaIpAddrPermitEntry=hwMusaIpAddrPermitEntry, hwMusaSlot=hwMusaSlot, hwMusaCampusPvcConfEntry=hwMusaCampusPvcConfEntry, hwMusaRtvbrScrIndex=hwMusaRtvbrScrIndex, hwMusaWarn24HourThreshold=hwMusaWarn24HourThreshold, hwMusaOperat=hwMusaOperat, hwMusaCampusPvcConfTable=hwMusaCampusPvcConfTable, hwMusaQueryCurNonCorrectHECNum=hwMusaQueryCurNonCorrectHECNum, hwMusaUpReservedBandWidth=hwMusaUpReservedBandWidth, hwMusaAdlPortOperat=hwMusaAdlPortOperat, hwMusaDownStreamTrafficRx=hwMusaDownStreamTrafficRx, hwMusaSysMib=hwMusaSysMib, hwMusaDstIp=hwMusaDstIp, hwMusaSetCommunity=hwMusaSetCommunity, hwMusaQueryMemory=hwMusaQueryMemory, hwMusaCDVT=hwMusaCDVT, hwMusaInsertBIP3=hwMusaInsertBIP3, hwMusaSlotUsedDownBandWidth=hwMusaSlotUsedDownBandWidth, hwMusaAdlSlotId=hwMusaAdlSlotId, hwMusaSlotCardSerial=hwMusaSlotCardSerial, hwMusaHostVersion=hwMusaHostVersion, hwMusaSlotConfTable=hwMusaSlotConfTable, hwMusaSlotUpBandWidth=hwMusaSlotUpBandWidth, hwMusaSlotUsedUpBandWidth=hwMusaSlotUsedUpBandWidth, hwMusaFrameNumbers=hwMusaFrameNumbers, hwMusaOimOpticTable=hwMusaOimOpticTable, hwMusaSysRouteTable=hwMusaSysRouteTable, hwMusaDestOnuId=hwMusaDestOnuId, hwMusaCbrPcrValue=hwMusaCbrPcrValue, hwMusaPatchLoadFilename=hwMusaPatchLoadFilename, hwMusaSrcCescFillDegree=hwMusaSrcCescFillDegree, hwMusaSrcApcConnectAttribute=hwMusaSrcApcConnectAttribute, hwMusaCidIndex=hwMusaCidIndex, hwMuasSrcSlotId=hwMuasSrcSlotId, hwMusaMacAddr=hwMusaMacAddr, hwMusaRtvbrScrValue=hwMusaRtvbrScrValue, hwMusaNmsStyle=hwMusaNmsStyle, hwMusaSlotDescript=hwMusaSlotDescript, hwMusaClearOIMErrEventStat=hwMusaClearOIMErrEventStat, hwMusaSlotCardType=hwMusaSlotCardType, hwMusaServiceClass=hwMusaServiceClass, hwMusaCbrPcrRefCount=hwMusaCbrPcrRefCount, hwMusaSrcFrcDlciType=hwMusaSrcFrcDlciType, hwMusaDestCescChannelBitmap=hwMusaDestCescChannelBitmap, hwMusaCpuOccupyRate=hwMusaCpuOccupyRate, hwMusaPermitEndIp=hwMusaPermitEndIp, hwMusaNmsPortVlanId=hwMusaNmsPortVlanId, hwMusaWarningIsCount=hwMusaWarningIsCount, hwMusaSysRouteOper=hwMusaSysRouteOper, hwMusaDestPortType=hwMusaDestPortType, hwMusaDestCescV35N=hwMusaDestCescV35N, hwMusaDestApcConnectAttribute=hwMusaDestApcConnectAttribute, hwMusaPermitBeginIp=hwMusaPermitBeginIp, hwMusaNmsENCAP=hwMusaNmsENCAP, hwMusaWarningEngDesc=hwMusaWarningEngDesc, hwMusaPatchIdIndex=hwMusaPatchIdIndex, hwMusaAllPvcConfEntry=hwMusaAllPvcConfEntry, hwMusaPvcCid=hwMusaPvcCid, hwMusaCurUsedDownBandWidth=hwMusaCurUsedDownBandWidth, hwMusaNextTrafficIndex=hwMusaNextTrafficIndex, hwMusaInsertPAIS=hwMusaInsertPAIS, hwMusaSrcOnuId=hwMusaSrcOnuId, hwMusaPatchEntry=hwMusaPatchEntry, hwMusaPatchDataLength=hwMusaPatchDataLength, hwMusaTrafficCbrPcrTable=hwMusaTrafficCbrPcrTable, hwMusaInsertLAIS=hwMusaInsertLAIS, hwMusaPatchDataAddress=hwMusaPatchDataAddress, hwMa5100EndOfMib=hwMa5100EndOfMib, hwMusaRefCount=hwMusaRefCount, hwMusaPatchTable=hwMusaPatchTable, hwMusaLoadOperType=hwMusaLoadOperType, hwMusaCellbusID=hwMusaCellbusID, hwMusaNmsFrameId=hwMusaNmsFrameId, hwMusaNmsLLCVC=hwMusaNmsLLCVC, hwMusaNmsStatus=hwMusaNmsStatus, hwMusaFrameConfEntry=hwMusaFrameConfEntry, hwMusaSrcCescChannelBitmap=hwMusaSrcCescChannelBitmap, hwMusaSysDate=hwMusaSysDate, hwMusaQueryCurUnmatchCellNum=hwMusaQueryCurUnmatchCellNum, hwMusaNmsRelayVpi=hwMusaNmsRelayVpi, hwMusaDestPortVlanVccId=hwMusaDestPortVlanVccId, hwMusaGetSetPort=hwMusaGetSetPort, hwMusaSrcToDestTraffic=hwMusaSrcToDestTraffic)
mibBuilder.exportSymbols("HUAWEI-MUSA-MA5100-MIB", hwMusaIpAddrRejectTable=hwMusaIpAddrRejectTable, hwMusaShelfConfTable=hwMusaShelfConfTable, hwMusaGatewayIpAddr=hwMusaGatewayIpAddr, hwMusaSetSrcLoop=hwMusaSetSrcLoop, hwMusaAdlPortCount=hwMusaAdlPortCount, hwMusaWarningTerminalCtrl=hwMusaWarningTerminalCtrl, hwMusaOimPhyEntry=hwMusaOimPhyEntry, hwMusaDestFrcFreeBandwidth=hwMusaDestFrcFreeBandwidth, hwMusaQueryCurPFEBE=hwMusaQueryCurPFEBE, hwMusaClp0pcr=hwMusaClp0pcr, hwMusaNmsSlotId=hwMusaNmsSlotId, hwMusaLoadContent=hwMusaLoadContent, hwMusaDestCescChannelId=hwMusaDestCescChannelId, hwMusaNmsPvcOper=hwMusaNmsPvcOper, hwMusaFrame=hwMusaFrame, hwMusaQueryCurBIP2=hwMusaQueryCurBIP2, hwMusaAdlFrameId=hwMusaAdlFrameId, hwMusaSlotDownBandWidth=hwMusaSlotDownBandWidth, hwMusaSrcFrcFreeBandwidth=hwMusaSrcFrcFreeBandwidth, hwMusaInsertBIP2=hwMusaInsertBIP2, hwMusaDestCescFillDegree=hwMusaDestCescFillDegree, hwMusaBiosVersion=hwMusaBiosVersion, hwMusaLoadRateEntry=hwMusaLoadRateEntry, hwMusaTrapPort=hwMusaTrapPort, hwMusaQueryCurSendCellNum=hwMusaQueryCurSendCellNum)
| [
"dcwangmit01@gmail.com"
] | dcwangmit01@gmail.com |
1c2e4e3b2accda8bfab8fd701fa489bb64d77a17 | 75132feadcd75b3ac76ca9eca608a59ae5b57e1d | /class_challenge.py | dd216cbaf2c977c8772334ea47665cc3ec072976 | [] | no_license | clutchmandd/Python-Projects | bcb4fb1f4f32e7c4a8062ddd94357bd6d96f7fa5 | e1ce8ce920547317b8a721e8216a1c67926f4c87 | refs/heads/main | 2023-08-19T05:44:08.129749 | 2021-10-08T16:26:20 | 2021-10-08T16:26:20 | 400,896,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 347 | py | # Creating a Class using the __init__() function
class Person:
def __init__(self, fname, lname, age):
self.fname = fname
self.lname = lname
self.age = age
def myfunc(self):
print('Hello my name is ' + self.fname + ' ' + self.lname + '.')
# Creating an instance
p1 = Person('David','Dixon',35)
p1.myfunc()
| [
"clutchmandd@gmail.com"
] | clutchmandd@gmail.com |
850ade5eeed22d497b51edf7a8f5ccd3b3049007 | 61efd764ae4586b6b2ee5e6e2c255079e2b01cfc | /azure-mgmt-network/azure/mgmt/network/v2017_10_01/models/metric_specification.py | a86668fcff8d8d4acf56738739afb3ab74282378 | [
"MIT"
] | permissive | AutorestCI/azure-sdk-for-python | a3642f53b5bf79d1dbb77851ec56f4cc0c5b3b61 | 60b0726619ce9d7baca41f6cd38f741d74c4e54a | refs/heads/master | 2021-01-21T02:23:59.207091 | 2018-01-31T21:31:27 | 2018-01-31T21:31:27 | 55,251,306 | 4 | 3 | null | 2017-11-13T17:57:46 | 2016-04-01T17:48:48 | Python | UTF-8 | Python | false | false | 4,163 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class MetricSpecification(Model):
"""Description of metrics specification.
:param name: The name of the metric.
:type name: str
:param display_name: The display name of the metric.
:type display_name: str
:param display_description: The description of the metric.
:type display_description: str
:param unit: Units the metric to be displayed in.
:type unit: str
:param aggregation_type: The aggregation type.
:type aggregation_type: str
:param availabilities: List of availability.
:type availabilities:
list[~azure.mgmt.network.v2017_10_01.models.Availability]
:param enable_regional_mdm_account: Whether regional MDM account enabled.
:type enable_regional_mdm_account: bool
:param fill_gap_with_zero: Whether gaps would be filled with zeros.
:type fill_gap_with_zero: bool
:param metric_filter_pattern: Pattern for the filter of the metric.
:type metric_filter_pattern: str
:param dimensions: List of dimensions.
:type dimensions: list[~azure.mgmt.network.v2017_10_01.models.Dimension]
:param is_internal: Whether the metric is internal.
:type is_internal: bool
:param source_mdm_account: The source MDM account.
:type source_mdm_account: str
:param source_mdm_namespace: The source MDM namespace.
:type source_mdm_namespace: str
:param resource_id_dimension_name_override: The resource Id dimension name
override.
:type resource_id_dimension_name_override: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'display_description': {'key': 'displayDescription', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'aggregation_type': {'key': 'aggregationType', 'type': 'str'},
'availabilities': {'key': 'availabilities', 'type': '[Availability]'},
'enable_regional_mdm_account': {'key': 'enableRegionalMdmAccount', 'type': 'bool'},
'fill_gap_with_zero': {'key': 'fillGapWithZero', 'type': 'bool'},
'metric_filter_pattern': {'key': 'metricFilterPattern', 'type': 'str'},
'dimensions': {'key': 'dimensions', 'type': '[Dimension]'},
'is_internal': {'key': 'isInternal', 'type': 'bool'},
'source_mdm_account': {'key': 'sourceMdmAccount', 'type': 'str'},
'source_mdm_namespace': {'key': 'sourceMdmNamespace', 'type': 'str'},
'resource_id_dimension_name_override': {'key': 'resourceIdDimensionNameOverride', 'type': 'str'},
}
def __init__(self, name=None, display_name=None, display_description=None, unit=None, aggregation_type=None, availabilities=None, enable_regional_mdm_account=None, fill_gap_with_zero=None, metric_filter_pattern=None, dimensions=None, is_internal=None, source_mdm_account=None, source_mdm_namespace=None, resource_id_dimension_name_override=None):
super(MetricSpecification, self).__init__()
self.name = name
self.display_name = display_name
self.display_description = display_description
self.unit = unit
self.aggregation_type = aggregation_type
self.availabilities = availabilities
self.enable_regional_mdm_account = enable_regional_mdm_account
self.fill_gap_with_zero = fill_gap_with_zero
self.metric_filter_pattern = metric_filter_pattern
self.dimensions = dimensions
self.is_internal = is_internal
self.source_mdm_account = source_mdm_account
self.source_mdm_namespace = source_mdm_namespace
self.resource_id_dimension_name_override = resource_id_dimension_name_override
| [
"laurent.mazuel@gmail.com"
] | laurent.mazuel@gmail.com |
2d579021b7824811d1c1632aaf56eda54eeaed4b | 1d833572db4b65799c3de4ceebb09740d6306729 | /QueueServer.py | 5d4e9d3ec2fb1dc016d231d793a7174d890e864c | [] | no_license | Kishi115/Systems-Modelling-and-Simulation | 31231b77cff0398de30b3c8fc86add7a550d9ea4 | c61bd42983e1266f1eed686b1e12fc4cc42d73d7 | refs/heads/master | 2020-12-21T00:08:18.667541 | 2020-01-26T01:04:56 | 2020-01-26T01:04:56 | 236,251,251 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,520 | py | import random
from tabulate import tabulate
# no. of arrivals
n = 100000
# column headers
interval = [random.randint(0, 10)]
arriveTime = [1]
waitTime = [0]
timeAtServe = [1]
serveTime = [random.randint(1, 5)]
departTime = [timeAtServe[0] + serveTime[0]]
queueLine = [0]
# average wait and departure times, and average queue length
avW = [0]
avD = [departTime[0]]
avQ = [0]
table = []
sumW = 0
sumD = departTime[0]
sumQ = 0
for x in range(1, n):
interval.append(random.randint(0, 10))
arriveTime.append(arriveTime[x-1] + interval[x])
waitTime.append(max(0, departTime[x-1] - arriveTime[x]))
timeAtServe.append(arriveTime[x] + waitTime[x])
serveTime.append(random.randint(1, 5))
departTime.append(timeAtServe[x] + serveTime[x])
# minus index of first person who departed after current arrival
# from index of current arrival
for y in range(0, x + 1):
if(departTime[y] > arriveTime[x]):
queueLine.append(x - y)
break
sumW = sumW + waitTime[x]
sumD = sumD + departTime[x]
sumQ = sumQ + queueLine[x]
avW.append(sumW/x)
avD.append(sumD/x)
avQ.append(sumQ/x)
# formatting into ascii table
for x in range(n):
table.append([interval[x], arriveTime[x], waitTime[x], timeAtServe[x], serveTime[x], departTime[x], queueLine[x], avW[x], avD[x], avQ[x]])
print (tabulate(table, headers=["i", "at", "wt", "tas", "st", "dt", "ql", "avW", "avD", "avQ"]))
| [
"noreply@github.com"
] | noreply@github.com |
0275ec02c5108085fde4a67dcf25fbc8dba2da0b | 40babe7363262beb13d1543ed41dab005b222e71 | /radflix/serializers.py | c667898e58e34d09fbd24be8389685b61369eee0 | [] | no_license | Jouskar/Django-Backend | bca166c087beb0b7e7662176439aee8bba36490a | bc511be07ba91459079e297c53a16e15c5b8ca31 | refs/heads/master | 2023-01-06T06:23:50.119400 | 2020-11-09T09:43:27 | 2020-11-09T09:43:27 | 311,287,209 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 543 | py | from rest_framework import serializers
from radflix.models import Movie
class MovieSerializer(serializers.ModelSerializer):
class Meta:
model = Movie
fields = ('id',
'show_id',
'type',
'title',
'director',
'cast',
'country',
'date_added',
'release_year',
'rating',
'duration',
'listed_in',
'description')
| [
"61375991+Jouskar@users.noreply.github.com"
] | 61375991+Jouskar@users.noreply.github.com |
83f325539952c23909157086bbb01b3725047fbd | d60f13e52d385fd7f839ee441d8df05b34f8c75b | /wirecell/test/__main__.py | ebc11006d8d4b16dfbd3ebe5983fdcf478ad5421 | [] | no_license | wenqiang-gu/wire-cell-python | 07fe7ac420fedf747e97ba424052e85222316234 | 981541f5618b94d55ee5f07c6eeff6fbbfa5fa93 | refs/heads/master | 2022-10-02T08:51:27.193403 | 2022-07-29T16:23:42 | 2022-07-29T16:23:42 | 228,528,245 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | import math
import click
from wirecell.util import ario, plottools
@click.group("test")
@click.pass_context
def cli(ctx):
'''
Wire Cell Test Commands
'''
@cli.command("plot")
@click.option("-n", "--name", default="noise",
help="The test name")
@click.argument("datafile")
@click.argument("output")
@click.pass_context
def plot(ctx, name, datafile, output):
'''
Make plots from file made by test_<test>.
'''
from importlib import import_module
mod = import_module(f'wirecell.test.{name}')
fp = ario.load(datafile)
with plottools.pages(output) as out:
mod.plot(fp, out)
def main():
cli(obj=dict())
if '__main__' == __name__:
main()
| [
"brett.viren@gmail.com"
] | brett.viren@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.