blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
376168fe008d4a2ec6337a3ab692cb9eca743790 | 296a60e4ca32f326dde269fd6eec5a5e0c8f743c | /site_app/migrations/0004_post.py | 9998a41892458ea6a4abda1fa20d0d3a08ba1e7a | [] | no_license | Najafova/company_introduction_landingpage1 | 758790ae4a1565f65b2714db000c2dac7c3128b1 | bb3d212a2057605f90fa5881804acaecfaf61788 | refs/heads/master | 2022-12-01T09:46:36.251346 | 2020-03-19T14:33:11 | 2020-03-19T14:33:11 | 248,525,225 | 0 | 0 | null | 2022-11-22T03:38:32 | 2020-03-19T14:32:56 | JavaScript | UTF-8 | Python | false | false | 845 | py | # Generated by Django 2.1.7 on 2019-02-18 07:44
import ckeditor.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('site_app', '0003_team_image'),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=255, null=True)),
('description', ckeditor.fields.RichTextField(blank=True, null=True)),
('body', models.TextField(blank=True, null=True)),
('order', models.IntegerField(blank=True, null=True)),
('slug', models.SlugField(blank=True, default='')),
],
),
]
| [
"gulnarnecefova1996@gmail.com"
] | gulnarnecefova1996@gmail.com |
3f1f6c50edaa6b7434171b31a68951df9f9c29e8 | 3cdb4faf34d8375d6aee08bcc523adadcb0c46e2 | /web/env/lib/python3.6/site-packages/django/conf/locale/th/formats.py | d7394eb69c315129df90d1aeeee0ca61a9a79231 | [
"MIT",
"GPL-3.0-only"
] | permissive | rizwansoaib/face-attendence | bc185d4de627ce5adab1cda7da466cb7a5fddcbe | 59300441b52d32f3ecb5095085ef9d448aef63af | refs/heads/master | 2020-04-25T23:47:47.303642 | 2019-09-12T14:26:17 | 2019-09-12T14:26:17 | 173,157,284 | 45 | 12 | MIT | 2020-02-11T23:47:55 | 2019-02-28T17:33:14 | Python | UTF-8 | Python | false | false | 1,072 | py | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y'
TIME_FORMAT = 'G:i'
DATETIME_FORMAT = 'j F Y, G:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'j M Y'
SHORT_DATETIME_FORMAT = 'j M Y, G:i'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d/%m/%Y', # 25/10/2006
'%d %b %Y', # 25 ต.ค. 2006
'%d %B %Y', # 25 ตุลาคม 2006
]
TIME_INPUT_FORMATS = [
'%H:%M:%S', # 14:30:59
'%H:%M:%S.%f', # 14:30:59.000200
'%H:%M', # 14:30
]
DATETIME_INPUT_FORMATS = [
'%d/%m/%Y %H:%M:%S', # 25/10/2006 14:30:59
'%d/%m/%Y %H:%M:%S.%f', # 25/10/2006 14:30:59.000200
'%d/%m/%Y %H:%M', # 25/10/2006 14:30
]
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
| [
"rizwansoaib@gmail.com"
] | rizwansoaib@gmail.com |
9ed93ded054e5409a3857b0612a1e109381b47cb | 771fc415b752721b89d418a510eb3e6a447a075c | /sitl_config/usv/vrx_gazebo/nodes/quat2rpy.py | 59a0a59de70be5baa82f09ba62ad55eae7cc6657 | [
"MIT"
] | permissive | robin-shaun/XTDrone | ebf8948ed44bf8fc796263a18aef8a985ac00e12 | 6ee0a3033b5eed8fcfddc9b3bfee7a4fb26c79db | refs/heads/master | 2023-09-02T10:49:03.888060 | 2023-07-16T13:02:07 | 2023-07-16T13:02:07 | 248,799,809 | 815 | 182 | null | 2020-09-17T12:50:46 | 2020-03-20T16:16:52 | Python | UTF-8 | Python | false | false | 3,454 | py | #!/usr/bin/env python
'''
Node to convert from quaternions to rpy in various ROS messages
'''
import rospy
import tf
from geometry_msgs.msg import Vector3
from geometry_msgs.msg import Pose
from geometry_msgs.msg import PoseArray
from sensor_msgs.msg import Imu
from gazebo_msgs.msg import ModelStates
from nav_msgs.msg import Odometry
class Node():
def __init__(self,pose_index=None,model_name=None,
input_msg_type='Pose'):
self.pubmsg = None
self.pub = None
self.pose_index = pose_index
self.model_name = model_name
self.input_msg_type = input_msg_type
def callback(self,data):
#rospy.loginfo("callback")
if (not (pose_index==None)):
data = data[pose_index]
elif self.model_name is not None:
try:
index = data.name.index(model_name)
except ValueError:
rospy.logwarn_throttle(10.0, 'Model state {} not found'.format(model_name))
return
data = data.pose[index]
elif ( (self.input_msg_type == 'Pose') or
(self.input_msg_type == 'Imu')):
pass
elif self.input_msg_type == 'Odometry':
data = data.pose.pose
else:
rospy.logerr("Don't know what to do with message type %s"%
self.input_msg_type)
sys.exit()
q = (data.orientation.x,
data.orientation.y,
data.orientation.z,
data.orientation.w)
euler = tf.transformations.euler_from_quaternion(q)
self.pubmsg.x = euler[0]
self.pubmsg.y = euler[1]
self.pubmsg.z = euler[2]
rospy.logdebug("publishing rpy: %.2f, %.2f, %.2f"
%(euler[0],euler[1],euler[2]))
self.pub.publish(self.pubmsg)
if __name__ == '__main__':
rospy.init_node('quat2rpy', anonymous=True)
# ROS Parameters
in_topic = 'in_topic'
out_topic = 'out_topic'
pose_index = rospy.get_param('~pose_index',None)
model_name = rospy.get_param('~model_name',None)
inmsgtype = rospy.get_param('~input_msg_type','Pose')
# Initiate node object
node=Node(pose_index, model_name, input_msg_type=inmsgtype)
node.pubmsg = Vector3()
# Setup publisher
node.pub = rospy.Publisher(out_topic,Vector3,queue_size=10)
# Subscriber
if (not(model_name == None)):
inmsgtype = 'ModelStates[%s]'% model_name
rospy.Subscriber(in_topic,ModelStates,node.callback)
elif (not (pose_index == None)):
inmsgtype = 'PoseArray[%d]'%pose_index
# Setup subscriber
rospy.Subscriber(in_topic,PoseArray,node.callback)
else:
if inmsgtype == 'Pose':
# Setup subscriber
rospy.Subscriber(in_topic,Pose,node.callback)
elif inmsgtype == 'Imu':
rospy.Subscriber(in_topic,Imu,node.callback)
elif inmsgtype == 'Odometry':
rospy.Subscriber(in_topic,Odometry,node.callback)
else:
rospy.logerr("I don't know how to deal with message type <%s>"%
inmsgtype)
sys.exit()
rospy.loginfo("Subscribing to %s, looking for %s messages."%
(in_topic,inmsgtype))
rospy.loginfo("Publishing to %s, sending Vector3 messages"%
(out_topic))
try:
rospy.spin()
except rospy.ROSInterruptException:
pass
| [
"robin_shaun@foxmail.com"
] | robin_shaun@foxmail.com |
199ccf442b1d9f93404fd9f73e43ceb2a301511c | e2b134234cf37a6d2ac18289d96daeec8388d479 | /srvres/__init__.py | 346eef6e77ebe70eae0f8976feade5314994fdd2 | [
"MIT"
] | permissive | MineRobber9000/srvres | d8f18fbeceedda0562efe8586330be84b14ba054 | ae006c910c9cf32952129a2de92765b973400993 | refs/heads/master | 2023-05-30T18:06:15.537464 | 2021-06-07T09:43:15 | 2021-06-07T09:43:15 | 374,605,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40 | py | from srvres.resolver import SRVResolver
| [
"khuxkm@ttm.sh"
] | khuxkm@ttm.sh |
9b6478662fe7153eccd5d4eb415af97ac57aec2b | f674baa2f4ca0ac0ab856bd2ae3df6317278faf8 | /message/migrations/0001_initial.py | d07d4ae40786a92748a0dcb91691844b265dca88 | [] | no_license | youichiro/nutfes-staff-app | 3d6e869cbc3f35c78f7ef1230fae3e075de4d445 | f3aed87b0fc27f99591f135aa2b73f54029c5560 | refs/heads/master | 2020-03-26T18:48:54.117252 | 2018-09-16T03:06:38 | 2018-09-16T03:06:38 | 145,232,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,681 | py | # Generated by Django 2.0.7 on 2018-08-25 09:42
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('text', models.TextField(max_length=1000)),
('importance', models.CharField(max_length=10)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'messages',
},
),
migrations.CreateModel(
name='Reply',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('text', models.TextField(max_length=1000)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('message', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='message.Message')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'replies',
},
),
]
| [
"cinnamon416@gmail.com"
] | cinnamon416@gmail.com |
6d5cfbd9dfb220db9c0d966b0284b9e5bddfc680 | a5c4ea16042a8078e360c32636c00e3163ac99a8 | /ImagenetBundle/chapter09-squeezenet/pyimagesearch/nn/mxconv/mxsqueezenet.py | d176140b5694953ac7b3adf83c600d3e497b6c13 | [] | no_license | lykhahaha/Mine | 3b74571b116f72ee17721038ca4c58796610cedd | 1439e7b161a7cd612b0d6fa4403b4c8c61648060 | refs/heads/master | 2020-07-15T05:16:13.808047 | 2019-06-01T07:30:01 | 2019-06-01T07:30:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,000 | py | imprt mxnet as mx
class MxSqueezeNet:
@staticmethod
def squeeze(data, num_filter):
# the first part of FIRE module consists of a number of 1x1 filter squuezes
conv_1x1 = mx.sym.Convolution(data=data, kernel=(1, 1), num_filter=num_filter)
act_1x1 = mx.sym.LeakyReLU(data=conv_1x1, act_type='elu')
return act_1x1
@staticmethod
def fire(data, num_squeeze_filter, num_expand_filter):
# construct 1x1 squeeze followed by 1x1 expand
squeeze_1x1 = MxSqueezeNet.squeeze(data, num_squeeze_filter)
expand_1x1 = mx.sym.Convolution(data=squeeze_1x1, kernel=(1, 1), num_filter=num_expand_filter)
relu_expand_1x1 = mx.sym.LeakyReLU(data=expand_1x1, act_type='elu')
# construct 3x3 expand
exapnd_3x3 = mx.sym.Convolution(data=squeeze_1x1, kernel=(3, 3), pad=(1, 1), num_filter=num_expand_filter)
relu_expand_3x3 = mx.sym.LeakyReLU(data=exapnd_3x3, act_type='elu')
# the output is concatenated along channels dimension
output = mx.sym.Concat(relu_expand_1x1, relu_expand_3x3, dim=1)
return output
@staticmethod
def build(classes):
# data input
data = mx.sym.Variable('data')
# Block #1: Conv -> ReLU -> Pool
conv_1 = mx.sym.Convolution(data=data, kernel=(7, 7), stride=(2, 2), num_filter=96)
act_1 = mx.sym.LeakyReLU(data=conv_1, act_type='elu')
pool_1 = mx.sym.Pooling(data=act_1, kernel=(3, 3), pool_type='max', stride=(2, 2))
# Block #2-4: (FIRE * 3) -> Pool
fire_2 = MxSqueezeNet.fire(pool_1, num_squeeze_filter=16, num_expand_filter=64)
fire_3 = MxSqueezeNet.fire(fire_2, num_squeeze_filter=16, num_expand_filter=64)
fire_4 = MxSqueezeNet.fire(fire_3, num_squeeze_filter=32, num_expand_filter=128)
pool_4 = mx.sym.Pooling(data=fire_4, kernel=(3, 3), pool_type='max', stride=(2, 2))
# Block #5-8 : (FIRE) * 4 -> Pool
fire_5 = MxSqueezeNet.fire(pool_4, num_squeeze_filter=32, num_expand_filter=128)
fire_6 = MxSqueezeNet.fire(fire_2, num_squeeze_filter=48, num_expand_filter=192)
fire_7 = MxSqueezeNet.fire(fire_3, num_squeeze_filter=48, num_expand_filter=192)
fire_8 = MxSqueezeNet.fire(fire_3, num_squeeze_filter=64, num_expand_filter=256)
pool_8 = mx.sym.Pooling(data=fire_8, kernel=(3, 3), pool_type='max', stride=(2, 2))
# Last block: FIRE -> Dropout -> Conv -> ACT -> Pool
fire_9 = MxSqueezeNet.fire(pool_8, num_squeeze_filter=64, num_expand_filter=256)
do_9 = mx.sym.Dropout(data=fire_9, p=0.5)
conv_10 = mx.sym.Convolution(data=do_9, kernel=(1, 1), num_filter=classes)
act_10 = mx.sym.LeakyReLU(data=conv_10, act_type='elu')
pool_10 = mx.sym.Pooling(data=act_10, kernel=(13, 13), pool_type='avg')
# softmax classifier
flatten = mx.sym.Flatten(data=pool_10)
model = mx.sym.SoftmaxOutput(data=flatten, name='softmax')
return model | [
"ITITIU15033@student.hcmiu.edu.vn"
] | ITITIU15033@student.hcmiu.edu.vn |
1e38e86a6fe23e2fdf32b69d62392265587fe770 | e55480007fde8acea46fe8eeb3ee7193c25ba113 | /tests/test_ds/test_array_queue.py | 8d7bfd6083cf5899b1b3906928cf2765f343ec77 | [] | no_license | Annihilation7/Ds-and-Al | 80301bf543ec2eb4b3a9810f5fc25b0386847fd3 | a0bc5f5ef4a92c0e7a736dcff77df61d46b57409 | refs/heads/master | 2020-09-24T05:04:41.250051 | 2020-02-15T10:31:10 | 2020-02-15T10:31:10 | 225,669,366 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 525 | py | import unittest
from src.ds.array_queue import ArrayQueue
class Test_ArrayQueue(unittest.TestCase):
def setUp(self) -> None:
self.processer = ArrayQueue()
def test_all(self):
elems = [i for i in range(10)]
for index, elem in enumerate(elems):
if index != 0 and index % 3 == 0:
self.processer.dequeue()
self.processer.printQueue()
continue
self.processer.enqueue(elem)
if __name__ == '__main__':
unittest.main()
| [
"763366463@qq.com"
] | 763366463@qq.com |
9e623f48c4ded2745ca63c8797f9d295299357eb | c05357142b9f112d401a77f9610079be3500675d | /danceschool/core/cms_apps.py | 2f38fcd1bd17a22fd9ef29cfae7349cd05395ef0 | [
"BSD-3-Clause"
] | permissive | NorthIsUp/django-danceschool | b3df9a9373c08e51fcaa88751e325b6423f36bac | 71661830e87e45a3df949b026f446c481c8e8415 | refs/heads/master | 2021-01-02T22:42:17.608615 | 2017-08-04T17:27:37 | 2017-08-04T17:27:37 | 99,373,397 | 1 | 0 | null | 2017-08-04T19:21:50 | 2017-08-04T19:21:50 | null | UTF-8 | Python | false | false | 601 | py | from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from django.utils.translation import ugettext_lazy as _
class RegistrationApphook(CMSApp):
name = _("Registration Apphook")
def get_urls(self, page=None, language=None, **kwargs):
return ["danceschool.core.urls_registration"]
class AccountsApphook(CMSApp):
name = _("Accounts Apphook")
def get_urls(self, page=None, language=None, **kwargs):
return ["danceschool.core.urls_accounts"]
apphook_pool.register(RegistrationApphook)
apphook_pool.register(AccountsApphook)
| [
"lee.c.tucker@gmail.com"
] | lee.c.tucker@gmail.com |
e7f97d55c16386e8fb5a231694dc710e02577ecf | 68528d13d49029074ad4a312c8c16c585f755914 | /scripts/average_filter.py | 8d3e8dc0e8ee6af46051aae803dbf24caff97490 | [
"MIT"
] | permissive | ShotaAk/pimouse_controller | 8132b3d95d6387aa5fae98b60d205f3179eaa62a | a7bbbd7109e582245cab0eaa017246fd44104474 | refs/heads/master | 2020-03-19T04:54:40.153634 | 2018-06-03T12:37:47 | 2018-06-03T12:37:47 | 135,881,207 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 865 | py | #!/usr/bin/env python
#encoding: utf8
import numpy as np
import math
# 移動平均フィルタ
# 平均値を計算するための個数は引数sizeで指定する
class AverageFilter():
def __init__(self, size):
if size <= 0:
size = 1
self._buffer = np.zeros(size, dtype=np.float32)
self._buffer_size = size
self._current_index = 0
self._filtered_value = 0
self._offset_value = 0
def update(self, value):
self._buffer[self._current_index] = value
self._current_index += 1
if self._current_index >= self._buffer_size:
self._current_index = 0
self._filtered_value = np.average(self._buffer)
def get_value(self):
return self._filtered_value - self._offset_value
def offset(self):
self._offset_value = self._filtered_value
| [
"macakasit@gmail.com"
] | macakasit@gmail.com |
e6502be55f11f703cab1033ac7cf94a0df500f3c | ab97407d4f3c91142f2bed8e5c5415fbc9be8ac6 | /python/Add Two Numbers.py | 62baca112887e5abe3a9077ca8d65e68032a533c | [] | no_license | Lendfating/LeetCode | d328c68578d10d6cdba0c4dcb3359ddf085546e1 | 4cc492e04a7003839f07df93070fa9c19726a1c5 | refs/heads/master | 2020-12-30T09:26:20.461654 | 2015-02-01T08:41:46 | 2015-02-01T08:41:46 | 26,170,626 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,625 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
# soluction
"""
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @return a ListNode
def addTwoNumbers(self, l1, l2):
if l1 is None: return l2
dump = ListNode(0)
p, p1, p2, carry = dump, l1, l2, 0
while p1 is not None and p2 is not None:
carry, p.next = (p1.val+p2.val+carry)/10, ListNode((p1.val+p2.val+carry)%10)
p, p1, p2 = p.next, p1.next, p2.next
if p1 is not None:
while p1 is not None and carry != 0:
carry, p.next = (p1.val+carry)/10, ListNode((p1.val+carry)%10)
p, p1 = p.next, p1.next
p.next = p1
elif p2 is not None:
while p2 is not None and carry != 0:
carry, p.next = (p2.val+carry)/10, ListNode((p2.val+carry)%10)
p, p2 = p.next, p2.next
p.next = p2
if carry != 0:
p.next = ListNode(carry)
return dump.next
# @return a ListNode
def addTwoNumbers1(self, l1, l2):
dump = ListNode(0)
p, p1, p2, sum = dump, l1, l2, 0
while p1 is not None or p2 is not None or sum!=0:
sum += (p1.val if p1 is not None else 0) + (p2.val if p2 is not None else 0)
p.next = ListNode(sum%10)
sum /= 10
p = p.next
p1 = p1.next if p1 is not None else None
p2 = p2.next if p2 is not None else None
return dump.next
if __name__ == '__main__':
pass
| [
"lizhen19900409@126.com"
] | lizhen19900409@126.com |
c119089df301b537c5f9324aa655dd8ff843e348 | 08db28fa3836c36433aa105883a762396d4883c6 | /spider/learning/day_04/07_requests_get.py | 67509a6a7a53277ba40f23333df8b8ff888dea59 | [] | no_license | xieyipeng/FaceRecognition | 1127aaff0dd121319a8652abcfe8a59a7beaaf43 | dede5b181d6b70b87ccf00052df8056a912eff0f | refs/heads/master | 2022-09-19T07:02:33.624410 | 2020-06-02T03:03:58 | 2020-06-02T03:03:58 | 246,464,586 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 635 | py | import requests
# 参数自动转译
# url = 'https://www.baidu.com/s?ie=UTF-8&wd=美女'
url = 'http://www.baidu.com/s'
params = {
"wd": "美女",
"ie": "utf-8"
}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36'
}
# response = requests.get(url, headers=headers)
response = requests.get(url, headers=headers, params=params)
data = response.content.decode('utf-8')
with open('07requests_get.html', 'w', encoding='utf-8')as f:
f.write(data)
# 发送post请求 添加参数
# requests.post(url,data=(参数{}),json=(参数))
| [
"3239202719@qq.com"
] | 3239202719@qq.com |
53c144744fd35855616f66ff8a7662edfe04d507 | ec126ba7180f687d310a56f100d38f717ac1702a | /calc final.py | de96e14d474516de570ed2ef6a5efd68bdf5558d | [] | no_license | Sahil4UI/PythonJan3-4AfternoonRegular2021 | 0cb0f14b964125510f253a38a1b9af30024e6e68 | 36f83d326489c28841d2e197eb840579bc64411f | refs/heads/main | 2023-03-04T21:31:56.407760 | 2021-02-17T10:26:51 | 2021-02-17T10:26:51 | 327,277,855 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | def Calc(x,y,opr):
return eval(x+opr+y)
a = (input("Enter first Number : "))
b =(input("Enter second Number : "))
choice = (input("enter operation you wanna perform : "))
res = Calc(a,b,choice)
print(res)
| [
"noreply@github.com"
] | Sahil4UI.noreply@github.com |
ad2f3075e13d53f508a2cae75716305a2f71c736 | 9e5452e9a8079125d2f89aedca7ca5b675171fee | /src/cargos/edible_oil.py | 811d51f8f88240b59b1455c6d220d6ae97149448 | [] | no_license | RadarCZ/firs | c16f8b2faf3c770c873bab948adc0bd850156dd5 | da1d614c0a92b91978ff212015ed9d00c9f37607 | refs/heads/master | 2023-08-13T09:05:32.939857 | 2021-09-24T18:10:28 | 2021-09-24T18:10:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 883 | py | from cargo import Cargo
cargo = Cargo(
id="edible_oil",
type_name="string(STR_CARGO_NAME_EDIBLE_OIL)",
unit_name="string(STR_CARGO_NAME_EDIBLE_OIL)",
type_abbreviation="string(STR_CID_EDIBLE_OIL)",
sprite="NEW_CARGO_SPRITE",
weight="1.0",
is_freight="1",
cargo_classes="bitmask(CC_PIECE_GOODS, CC_LIQUID)",
cargo_label="EOIL",
# apart from TOWNGROWTH_PASSENGERS and TOWNGROWTH_MAIL, FIRS does not set any town growth effects; this has the intended effect of disabling food / water requirements for towns in desert and above snowline
town_growth_effect="TOWNGROWTH_NONE",
town_growth_multiplier="1.0",
units_of_cargo="TTD_STR_LITERS",
items_of_cargo="string(STR_CARGO_UNIT_EDIBLE_OIL)",
penalty_lowerbound="20",
single_penalty_length="128",
price_factor=116,
capacity_multiplier="1",
icon_indices=(0, 3),
)
| [
"andy@teamrubber.com"
] | andy@teamrubber.com |
635d68f5ec22d6be70e09af9a059a4673c87a2b6 | 63b0fed007d152fe5e96640b844081c07ca20a11 | /ABC/ABC001~ABC099/ABC062/a.py | b0e83e1c338410207e43b30673b420f1ce8d2f2d | [] | no_license | Nikkuniku/AtcoderProgramming | 8ff54541c8e65d0c93ce42f3a98aec061adf2f05 | fbaf7b40084c52e35c803b6b03346f2a06fb5367 | refs/heads/master | 2023-08-21T10:20:43.520468 | 2023-08-12T09:53:07 | 2023-08-12T09:53:07 | 254,373,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | x, y = map(int, input().split())
g = [-1, 0, 2, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0]
ans = 'No'
if g[x] == g[y]:
ans = 'Yes'
print(ans)
| [
"ymdysk911@gmail.com"
] | ymdysk911@gmail.com |
266edd864291ce9f0e8dd6c6dbe2b32c284924c9 | fcc63d65284593a9ad45e28dd8c49445aa4a8d30 | /manage.py | 08f78aa46edb9f464700090ac1a4214dc028624c | [] | no_license | Hardworking-tester/API_SAMPLE | 0b33a2ee52e4d316775a09c9c897275b26e027c9 | 867f0b289a01fea72081fd74fbf24b2edcfe1d2d | refs/heads/master | 2021-01-23T12:32:36.585842 | 2017-06-23T02:31:39 | 2017-06-23T02:31:39 | 93,167,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 612 | py | # encoding:utf-8
# author:wwg
from app import create_app,db
from flask_script import Manager, Shell,Server
from app.models import FunctionModelsDb
from flask_migrate import Migrate,MigrateCommand
app = create_app()
manager = Manager(app)
manager.add_command("runserver", Server(use_debugger=True))
# migrate=Migrate(app,db)
# def make_shell_context():
# return dict(app=app, db=db, FunctionModels=FunctionModels, CaseInformation=CaseInformation)
# manager.add_command("shell", Shell(make_context=make_shell_context))
# manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
| [
"373391120@qq.com"
] | 373391120@qq.com |
86cfcd2cb4cf1f59e0ea551e9420e1a9e389eaf1 | a9e3f3ad54ade49c19973707d2beb49f64490efd | /Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/common/djangoapps/student/signals/receivers.py | 6af93ee1068cae5f32c12f1ee11ca6af02f5bfa3 | [
"MIT",
"AGPL-3.0-only",
"AGPL-3.0-or-later"
] | permissive | luque/better-ways-of-thinking-about-software | 8c3dda94e119f0f96edbfe5ba60ca6ec3f5f625d | 5809eaca7079a15ee56b0b7fcfea425337046c97 | refs/heads/master | 2021-11-24T15:10:09.785252 | 2021-11-22T12:14:34 | 2021-11-22T12:14:34 | 163,850,454 | 3 | 1 | MIT | 2021-11-22T12:12:31 | 2019-01-02T14:21:30 | JavaScript | UTF-8 | Python | false | false | 3,432 | py | """
Signal receivers for the "student" application.
"""
# pylint: disable=unused-argument
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import IntegrityError
from django.db.models.signals import post_save, pre_save
from django.dispatch import receiver
from edx_name_affirmation.signals import VERIFIED_NAME_APPROVED
from lms.djangoapps.courseware.toggles import courseware_mfe_progress_milestones_are_active
from common.djangoapps.student.helpers import EMAIL_EXISTS_MSG_FMT, USERNAME_EXISTS_MSG_FMT, AccountValidationError
from common.djangoapps.student.models import (
CourseEnrollment,
CourseEnrollmentCelebration,
PendingNameChange,
is_email_retired,
is_username_retired
)
from common.djangoapps.student.models_api import confirm_name_change
@receiver(pre_save, sender=get_user_model())
def on_user_updated(sender, instance, **kwargs):
"""
Check for retired usernames.
"""
# Check only at User creation time and when not raw.
if not instance.id and not kwargs['raw']:
prefix_to_check = getattr(settings, 'RETIRED_USERNAME_PREFIX', None)
if prefix_to_check:
# Check for username that's too close to retired username format.
if instance.username.startswith(prefix_to_check):
raise AccountValidationError(
USERNAME_EXISTS_MSG_FMT.format(username=instance.username),
field="username"
)
# Check for a retired username.
if is_username_retired(instance.username):
raise AccountValidationError(
USERNAME_EXISTS_MSG_FMT.format(username=instance.username),
field="username"
)
# Check for a retired email.
if is_email_retired(instance.email):
raise AccountValidationError(
EMAIL_EXISTS_MSG_FMT.format(email=instance.email),
field="email"
)
@receiver(post_save, sender=CourseEnrollment)
def create_course_enrollment_celebration(sender, instance, created, **kwargs):
"""
Creates celebration rows when enrollments are created
This is how we distinguish between new enrollments that we want to celebrate and old ones
that existed before we introduced a given celebration.
"""
if not created:
return
# The UI for celebrations is only supported on the MFE right now, so don't turn on
# celebrations unless this enrollment's course is MFE-enabled and has milestones enabled.
if not courseware_mfe_progress_milestones_are_active(instance.course_id):
return
try:
CourseEnrollmentCelebration.objects.create(
enrollment=instance,
celebrate_first_section=True,
)
except IntegrityError:
# A celebration object was already created. Shouldn't happen, but ignore it if it does.
pass
@receiver(VERIFIED_NAME_APPROVED)
def listen_for_verified_name_approved(sender, user_id, profile_name, **kwargs):
"""
If the user has a pending name change that corresponds to an approved verified name, confirm it.
"""
user = get_user_model().objects.get(id=user_id)
try:
pending_name_change = PendingNameChange.objects.get(user=user, new_name=profile_name)
confirm_name_change(user, pending_name_change)
except PendingNameChange.DoesNotExist:
pass
| [
"rafael.luque@osoco.es"
] | rafael.luque@osoco.es |
0b9fc00ced102d9e7d221b9e3f42f3e11458e8d5 | 56f5b2ea36a2258b8ca21e2a3af9a5c7a9df3c6e | /CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/GluGluToHToTauTau_M-95_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7C-v1/AODSIM/PAT_CMG_V5_16_0_1377467473/HTT_24Jul_newTES_manzoni_Up_Jobs/Job_9/run_cfg.py | c4c5df9677f7f4e810c52603360e671cef3b0002 | [] | no_license | rmanzoni/HTT | 18e6b583f04c0a6ca10142d9da3dd4c850cddabc | a03b227073b2d4d8a2abe95367c014694588bf98 | refs/heads/master | 2016-09-06T05:55:52.602604 | 2014-02-20T16:35:34 | 2014-02-20T16:35:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,500 | py | import FWCore.ParameterSet.Config as cms
import os,sys
sys.path.append('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/GluGluToHToTauTau_M-95_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7C-v1/AODSIM/PAT_CMG_V5_16_0_1377467473/HTT_24Jul_newTES_manzoni_Up_Jobs')
from base_cfg import *
process.source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring('keep *',
'drop cmgStructuredPFJets_cmgStructuredPFJetSel__PAT'),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring('/store/cmst3/group/cmgtools/CMG/GluGluToHToTauTau_M-95_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7C-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_46_1_8HB.root',
'/store/cmst3/group/cmgtools/CMG/GluGluToHToTauTau_M-95_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7C-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_47_1_hDi.root',
'/store/cmst3/group/cmgtools/CMG/GluGluToHToTauTau_M-95_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7C-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_48_1_dzl.root',
'/store/cmst3/group/cmgtools/CMG/GluGluToHToTauTau_M-95_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7C-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_49_1_0TO.root',
'/store/cmst3/group/cmgtools/CMG/GluGluToHToTauTau_M-95_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7C-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_4_1_ork.root')
)
| [
"riccardo.manzoni@cern.ch"
] | riccardo.manzoni@cern.ch |
3c620fa7634959754cb7c5f09e025d6e39b6b3dd | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /MY_REPOS/PYTHON_PRAC/learn-python3/oop_advance/orm.py | f59b23a9e893ff71ab7c6301d8df304de7161bfc | [
"MIT"
] | permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 2,262 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
" Simple ORM using metaclass "
class Field(object):
def __init__(self, name, column_type):
self.name = name
self.column_type = column_type
def __str__(self):
return "<%s:%s>" % (self.__class__.__name__, self.name)
class StringField(Field):
def __init__(self, name):
super(StringField, self).__init__(name, "varchar(100)")
class IntegerField(Field):
def __init__(self, name):
super(IntegerField, self).__init__(name, "bigint")
class ModelMetaclass(type):
def __new__(cls, name, bases, attrs):
if name == "Model":
return type.__new__(cls, name, bases, attrs)
print("Found model: %s" % name)
mappings = dict()
for k, v in attrs.items():
if isinstance(v, Field):
print("Found mapping: %s ==> %s" % (k, v))
mappings[k] = v
for k in mappings.keys():
attrs.pop(k)
attrs["__mappings__"] = mappings # 保存属性和列的映射关系
attrs["__table__"] = name # 假设表名和类名一致
return type.__new__(cls, name, bases, attrs)
class Model(dict, metaclass=ModelMetaclass):
def __init__(self, **kw):
super(Model, self).__init__(**kw)
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(r"'Model' object has no attribute '%s'" % key)
def __setattr__(self, key, value):
self[key] = value
def save(self):
fields = []
params = []
args = []
for k, v in self.__mappings__.items():
fields.append(v.name)
params.append("?")
args.append(getattr(self, k, None))
sql = "insert into %s (%s) values (%s)" % (
self.__table__,
",".join(fields),
",".join(params),
)
print("SQL: %s" % sql)
print("ARGS: %s" % str(args))
# testing code:
class User(Model):
id = IntegerField("id")
name = StringField("username")
email = StringField("email")
password = StringField("password")
u = User(id=12345, name="Michael", email="test@orm.org", password="my-pwd")
u.save()
| [
"bryan.guner@gmail.com"
] | bryan.guner@gmail.com |
df3e9efc8ee499934b9e1b00f6d263afcf93c57b | 3f2d1c68d07dd6677bc19c559b1960ca5fef6346 | /knn/get_data.py | f8f4a916bc28894a39badcdf63732a71756173c4 | [] | no_license | 213584adghj/ml | 6ffcf732377dabda129990e3a89468e18dd2700c | f73080e13c4a1c6babe0229bdb939eb3a7f988b6 | refs/heads/master | 2021-03-13T23:22:41.981534 | 2020-03-12T01:59:21 | 2020-03-12T01:59:21 | 246,720,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,102 | py | # -*- coding: utf-8 -*-
import numpy as np
import sys
from sklearn.model_selection import train_test_split
sys.path.append('...')
from conf import knn as kn
import os
import re
class data(object):
def __init__(self):
self.base_data_path = self.get_path()
self.all_data_x, self.all_data_y = self.get_all_data()
self.x_train, self.x_test, self.y_train, self.y_test = self.split_all_data()
self.save_train_path()
self.save_test_path()
def get_path(self):
path = os.getcwd()
path = path.rstrip('\src\knn')
path = path + kn.CONFIG['data']['base_data_file']
return path
def get_all_data(self):
dataMat = []
labelMat = []
path = self.base_data_path
fr = open(path, 'r')
for line in fr.readlines():
curLine = line.strip().split('\t')
dataMat.append(np.array(curLine[0:len(curLine) - 1], dtype=float))
labelMat.append(int(re.sub("\D", "", curLine[-1])))
return np.array(dataMat), np.array(labelMat, dtype=int)
def split_all_data(self):
test_size = 1 - kn.CONFIG['data']['parameter']['characteristic_amount']
x_train, x_test, y_train, y_test = train_test_split(self.all_data_x, self.all_data_y, test_size=test_size,
random_state=0)
return x_train, x_test, y_train, y_test
def save_train_path(self):
path = os.getcwd()
path = path.rstrip('\\src\\knn')
p = path + kn.CONFIG['data']['train_data_path']['x']
q = path + kn.CONFIG['data']['train_data_path']['y']
np.savetxt(p, self.x_train, fmt='%s', newline='\n')
np.savetxt(q, self.y_train, fmt='%s', newline='\n')
def save_test_path(self):
path = os.getcwd()
path = path.rstrip('\\src\\knn')
p = path + kn.CONFIG['data']['test_data_path']['x']
q = path + kn.CONFIG['data']['test_data_path']['y']
np.savetxt(p, self.x_test, fmt='%s', newline='\n')
np.savetxt(q, self.y_test, fmt='%s', newline='\n') | [
"you@example.com"
] | you@example.com |
3d74f3713ebc51593b0995dfa896d8ac0e3b2557 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_348/ch120_2020_03_28_13_43_27_150252.py | 8ca957e5746b650c5766458db3710c75e345506e | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 782 | py | from random import randint
dinheiro = 100
print(dinheiro)
jogo = True
a = randint(1,36)
while jogo:
valor = float(input('aposte um valor:'))
while valor > 0:
aposta = input('numero ou paridade:')
if aposta == 'n':
numero = int(input('Escolha um numero:'))
if numero == a:
dinheiro = dinheiro + 35*valor
else:
dinheiro = dinheiro - 10
elif aposta == 'p':
escolha = input('c ou i:')
if a%2 == 0 and escolha == 'p':
dinheiro = dinheiro + valor
elif a%2 != 0 and escolha == 'i':
dinheiro = dinheiro + valor
else:
dinheiro = dinheiro - valor
else:
jogo = False
print(dinheiro) | [
"you@example.com"
] | you@example.com |
b801eba554c6416ecd4abdfc6ece4a5c3fb09be3 | aeee9575513324e329331b7cd1b28d157c297330 | /server.py | 0dcfd2b4567febb2fddb04507b0cb96cca101069 | [] | no_license | martolini/ktntcpchat | 0e80f0d00fc8d3c6c23cd9086c56622be5c58b58 | 887b060c94877bd773c2b1566d7a801632c21a56 | refs/heads/master | 2020-12-24T13:27:55.609069 | 2014-03-19T20:07:32 | 2014-03-19T20:07:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,111 | py | '''
KTN-project 2013 / 2014
Very simple server implementation that should serve as a basis
for implementing the chat server
'''
import SocketServer, json
import re
from threading import Lock
from datetime import datetime
'''
The RequestHandler class for our server.
It is instantiated once per connection to the server, and must
override the handle() method to implement communication to the
client.
'''
class ClientHandler(SocketServer.BaseRequestHandler):
def __init__(self, *args, **kwargs):
SocketServer.BaseRequestHandler.__init__(self, *args, **kwargs)
self.username = None
self.loggedin = False
def username_is_valid(self):
return re.match('^[0-9a-zA-Z_]+$', self.username)
def username_is_taken(self):
return self.username in self.server.connections.keys()
def handle_login(self, data):
self.username = data['username']
if self.username_is_valid() and not self.username_is_taken():
self.server.connections[self.username] = self.connection
self.connection.sendall(json.dumps({'response': 'login', 'username': self.username, 'messages': self.server.messages}))
self.loggedin = True
return
if not self.username_is_valid():
error = 'Invalid username!'
elif self.username_is_taken():
error = 'Name already taken!'
self.connection.sendall(json.dumps({'response': 'login', 'username': self.username, 'error': error}))
def handle(self):
# Get a reference to the socket object
self.connection = self.request
# Get the remote ip adress of the socket
self.ip = self.client_address[0]
# Get the remote port number of the socket
self.port = self.client_address[1]
print 'Client connected @' + self.ip + ':' + str(self.port)
while True:
data = self.connection.recv(1024).strip()
if data:
data = json.loads(data)
if data['request'] == 'login':
self.handle_login(data)
elif data['request'] == 'logout':
message = {'response': 'logout', 'username': self.username}
if not self.loggedin:
message['error'] = "Not logged in!"
self.connection.sendall(json.dumps(message))
del self.server.connections[self.username]
break
elif data['request'] == 'message':
if not self.loggedin:
message = json.dumps({'response': 'message', 'error': 'You are not logged in!'})
self.connection.sendall(message)
else:
message = json.dumps({'response': 'message', 'message': "<%s> said @ %s: %s" % (self.username, datetime.now().strftime("%H:%M"), data['message'])})
self.server.messages.append(message)
for conn in self.server.connections.values():
conn.sendall(message)
else:
print 'WHAAAAAT'
else:
print 'Connection with %s lost' % self.ip
del self.server.connections[self.username]
break
# Check if the data exists
# (recv could have returned due to a disconnect)
'''
This will make all Request handlers being called in its own thread.
Very important, otherwise only one client will be served at a time
'''
class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
def __init__(self, *args, **kwargs):
SocketServer.TCPServer.__init__(self, *args, **kwargs)
self.connections = {}
self.messages = []
if __name__ == "__main__":
HOST = 'localhost'
PORT = 9999
# Create the server, binding to localhost on port 9999
server = ThreadedTCPServer((HOST, PORT), ClientHandler)
# Activate the server; this will keep running until you
# interrupt the program with Ctrl-C
server.serve_forever()
| [
"msroed@gmail.com"
] | msroed@gmail.com |
6ee1be19681e1d0b0a92b6188c6b7d23f9e185c6 | f538e3974b8d9718a3cd24c1dea77023789c9315 | /DjangoUbuntu/images_env/images/home/urls.py | 779918c3280b79f78139b92224f69383058c32fe | [] | no_license | doremonkinhcan87/BlogImage | de1eab86505befb595844ed15168d1eb7d352121 | c25dbe8c0a54c3294d3c8353cc9baf0a748a3707 | refs/heads/master | 2016-08-11T10:18:19.654850 | 2016-01-27T09:07:13 | 2016-01-27T09:07:13 | 49,034,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | from django.conf.urls import url
from django.contrib import admin
from home import views as home_views
urlpatterns = [
url(
r'^$',
home_views.index,
name='index'),
] | [
"dautienthuy@gmail.com"
] | dautienthuy@gmail.com |
e4ca26f30e0569af1913ec828b403f78fcf1dc99 | 01196cb36e60d2f4e0bd04fb8a230c82512c6d1d | /EmployeeManagmentSystem/poll/models.py | 8bc144c4f484787a486066cd9c5fed83d93a8363 | [] | no_license | dipayandutta/django | ba69b7834bd95d4564e44155504f770b780f3ebd | 3139a7b720911a4f876fb6ef9d086c39f83e3762 | refs/heads/master | 2022-04-27T18:56:18.129939 | 2022-03-23T14:05:22 | 2022-03-23T14:05:22 | 132,355,123 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 776 | py | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Question(models.Model):
title = models.TextField(null=True,blank=True)
status = models.CharField(default='inactive',max_length=10)
created_by = models.ForeignKey(User,null=True,blank=True,on_delete=models.CASCADE)
create_at = models.DateTimeField(null=True,blank=True)
updated_at = models.DateTimeField(null=True,blank=True)
def __str__(self):
return self.title
class Choice(models.Model):
question = models.ForeignKey('poll.Question',on_delete=models.CASCADE)
text = models.TextField(null=True,blank=True)
create_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.text
| [
"inbox.dipayan@gmail.com"
] | inbox.dipayan@gmail.com |
261ecd163dcc1512445518a9868ef7263a49e1b9 | 48832d27da16256ee62c364add45f21b968ee669 | /res_bw/scripts/common/lib/lib2to3/fixes/fix_xreadlines.py | 39a696f2250018e3185cba4d70fb28000233131b | [] | no_license | webiumsk/WOT-0.9.15.1 | 0752d5bbd7c6fafdd7f714af939ae7bcf654faf7 | 17ca3550fef25e430534d079876a14fbbcccb9b4 | refs/heads/master | 2021-01-20T18:24:10.349144 | 2016-08-04T18:08:34 | 2016-08-04T18:08:34 | 64,955,694 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 1,002 | py | # 2016.08.04 20:00:20 Střední Evropa (letní čas)
# Embedded file name: scripts/common/Lib/lib2to3/fixes/fix_xreadlines.py
"""Fix "for x in f.xreadlines()" -> "for x in f".
This fixer will also convert g(f.xreadlines) into g(f.__iter__)."""
from .. import fixer_base
from ..fixer_util import Name
class FixXreadlines(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "\n power< call=any+ trailer< '.' 'xreadlines' > trailer< '(' ')' > >\n |\n power< any+ trailer< '.' no_call='xreadlines' > >\n "
def transform(self, node, results):
no_call = results.get('no_call')
if no_call:
no_call.replace(Name(u'__iter__', prefix=no_call.prefix))
else:
node.replace([ x.clone() for x in results['call'] ])
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\lib2to3\fixes\fix_xreadlines.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.08.04 20:00:20 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
a6f5c05679a178711e1a3abdd53c281dcbd83546 | ffa651d0a81ce5629ab760fbd18d4e18f2b3f3ed | /venv/lib/python3.9/site-packages/pip/_vendor/html5lib/filters/whitespace.py | 8f71ebfa7e095f19288caa7d380f8abd8badb31b | [] | no_license | superew/TestUI-Setel | b02d4c18a3301b57eaecb7ff60ccce7b9e1e7813 | 5cd48475fc6622ed289d15c109428f55d3d5d6f6 | refs/heads/master | 2023-08-02T11:16:34.646927 | 2021-09-30T17:21:43 | 2021-09-30T17:21:43 | 410,351,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,062 | py | from __future__ import absolute_import, division, unicode_literals
import re
from . import base
from ..constants import rcdataElements, spaceCharacters
spaceCharacters = "".join(spaceCharacters)
SPACES_REGEX = re.compile("[%s]+" % spaceCharacters)
class Filter(base.Filter):
"""Collapses whitespace except in pre, textarea, and script elements"""
spacePreserveElements = frozenset(["pre", "textarea"] + list(rcdataElements))
def __iter__(self):
preserve = 0
for token in base.Filter.__iter__(self):
type = token["type"]
if type == "StartTag" \
and (preserve or token["name"] in self.spacePreserveElements):
preserve += 1
elif type == "EndTag" and preserve:
preserve -= 1
elif not preserve and type == "SpaceCharacters" and token["data"]:
# Test on token["data"] above to not introduce spaces where there were not
token["data"] = " "
elif not preserve and type == "Characters":
token["data"] = collapse_spaces(token["data"])
yield token
def collapse_spaces(text):
return SPACES_REGEX.sub(' ', text)
| [
"blackan.andrew@gmail.com"
] | blackan.andrew@gmail.com |
51007ea1a328bbe0321cff96a9091326b6171efb | 9d268f0cedc3089dd95b6b22cc1a0191f43dac00 | /basket/admin.py | 1024e6ec91575d300e8585758be8b9fcef2c5bd2 | [] | no_license | Dmitry-Kiselev/store | a5c5b3ade4baa8fa7b600e10feeae352e318340a | 193788ac5c00e699863c0194661085e7be08bdf7 | refs/heads/master | 2022-12-13T19:12:39.553834 | 2017-06-15T11:38:14 | 2017-06-15T11:38:14 | 91,546,511 | 0 | 0 | null | 2022-12-07T23:57:51 | 2017-05-17T07:28:09 | Python | UTF-8 | Python | false | false | 344 | py | from django.contrib import admin
from .models import Basket, Line
class LineInlineAdmin(admin.TabularInline):
model = Line
@admin.register(Line)
class LineAdmin(admin.ModelAdmin):
list_display = ['product']
@admin.register(Basket)
class BasketAdmin(admin.ModelAdmin):
list_display = ['user']
inlines = [LineInlineAdmin, ]
| [
"kdem27@gmail.com"
] | kdem27@gmail.com |
26fca0035e4cf8bea3c2580d50de7c80c409c23f | 83cbf14b6806460daf4c556e1d8c49d9a3e8050e | /ration/wsgi.py | d1205c887feeb57cc186a485e323045aebf245b8 | [] | no_license | pauloendoh/old-ration | e0d853a22adbbb94890b1172e69c5ce8f336b6b0 | 2d07ee5d546e0f3b94c8e562c4e3af98d58579d0 | refs/heads/master | 2021-09-07T01:37:32.012653 | 2018-02-15T06:42:22 | 2018-02-15T06:42:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | """
WSGI config for ration project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ration.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| [
"paulo.endoh@gmail.com"
] | paulo.endoh@gmail.com |
305dc36192283483c72fd39e44335055375b1dc7 | b91578b96ffe63639d3efc70d4737b92091cd0b1 | /backend/unpp_api/apps/common/management/commands/clean_commonfile_orphans.py | 29cbb451ddc386beafc2e0e3e0cea4ed46135ef6 | [
"Apache-2.0"
] | permissive | unicef/un-partner-portal | 876b6ec394909ed2f72777493623413e9cecbfdc | 73afa193a5f6d626928cae0025c72a17f0ef8f61 | refs/heads/develop | 2023-02-06T21:08:22.037975 | 2019-05-20T07:35:29 | 2019-05-20T07:35:29 | 96,332,233 | 6 | 1 | Apache-2.0 | 2023-01-25T23:21:41 | 2017-07-05T15:07:44 | JavaScript | UTF-8 | Python | false | false | 1,077 | py | from __future__ import absolute_import
from datetime import datetime
from dateutil.relativedelta import relativedelta
from django.core.management.base import BaseCommand
from common.models import CommonFile
class Command(BaseCommand):
help = 'Cleans up files that have no existing references.'
def add_arguments(self, parser):
parser.add_argument(
'--all',
action='store_true',
dest='all',
default=False,
help='Do not exclude recent files'
)
def handle(self, *args, **options):
common_files = CommonFile.objects.all()
if not options.get('all'):
common_files = common_files.filter(created_lte=datetime.now() - relativedelta(weeks=1))
self.stdout.write('Start checking current files')
cf: CommonFile
for cf in common_files.iterator():
if not cf.has_existing_reference:
self.stdout.write(f'{cf} has no references, removing...')
cf.delete()
self.stdout.write('Finish files scan')
| [
"maciej.jaworski@tivix.com"
] | maciej.jaworski@tivix.com |
d9d16a5349fe9049d369f393d45cbbbbba29aa22 | e9ef3cd143478660d098668a10e67544a42b5878 | /Lib/corpuscrawler/crawl_nhw.py | 13bb6f4b883306e83b163a53dfe669241a0069f9 | [
"Apache-2.0"
] | permissive | google/corpuscrawler | a5c790c19b26e6397b768ce26cf12bbcb641eb90 | 10adaecf4ed5a7d0557c8e692c186023746eb001 | refs/heads/master | 2023-08-26T04:15:59.036883 | 2022-04-20T08:18:11 | 2022-04-20T08:18:11 | 102,909,145 | 119 | 40 | NOASSERTION | 2022-04-20T08:18:12 | 2017-09-08T22:21:03 | Python | UTF-8 | Python | false | false | 809 | py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function, unicode_literals
from corpuscrawler.util import crawl_bibleis
def crawl(crawler):
out = crawler.get_output(language='nhw')
crawl_bibleis(crawler, out, bible='NHWTBL')
| [
"sascha@brawer.ch"
] | sascha@brawer.ch |
b600795d48e3d62be0154d5ee0d1f86cef382183 | 506cb452b371218df26fac6a1b41c46d19ce83fd | /integer_reverse.py | fbbbe7b8011994a47699d09eb49c54cddc44a97c | [] | no_license | shhhhhigne/guessing-game | 135cfcaaccad8aa6ba9e1267007d3222eab3bec8 | 5e9b137b8717061990d4b320bff4b630b3da103c | refs/heads/master | 2021-04-29T02:48:32.459926 | 2017-01-04T20:56:05 | 2017-01-04T20:56:05 | 78,052,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | def reverse_integer():
user_input = int(raw_input("Input number: "))
while user_input is not 0:
n = user_input % 10
print n
user_input = (user_input - n)/10
#print user_input
reverse_integer()
| [
"no-reply@hackbrightacademy.com"
] | no-reply@hackbrightacademy.com |
5c5b9d46206ac9713596e8e8bf604697810ef3e7 | e9fe65ef3aa2d2d0c5e676fee8ac21acd2b82218 | /nestedList_3_3.py | 1ca2fa4c5e6b7ef1263562d3342caa61b9f5b55d | [] | no_license | MayowaFunmi/Algorithm-Problem-Solutions | 5d3f54199fa381ca778bf8e932fdf599d5a42a77 | 77124d0212c1c8a09af64b445d9b1207444710b9 | refs/heads/master | 2022-12-27T22:07:10.883196 | 2020-10-11T00:34:12 | 2020-10-11T00:34:12 | 303,010,447 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | list = [["yes", 5.87], ["me", 2.29], ["them", 4.55], ["him", 0.34], ["she", 2.29], ["our", 4.55]]
score = []
for i in list:
score.append(i[1])
score.sort()
print(score)
names = []
for name, mark in list:
if mark == score[-2]:
names.append(name)
names.sort()
for i in names:
print(i) | [
"akinade.mayowa@gmail.com"
] | akinade.mayowa@gmail.com |
a02e4f7a7ac588561c425e85fede489e9ac79fa5 | af93b3909f86ab2d310a8fa81c9357d87fdd8a64 | /begginer/8.cas/zadatak6.py | b1aa0bb49f735766be9270a393eccc4fb04ecf1b | [] | no_license | BiljanaPavlovic/pajton-kurs | 8cf15d443c9cca38f627e44d764106ef0cc5cd98 | 93092e6e945b33116ca65796570462edccfcbcb0 | refs/heads/master | 2021-05-24T14:09:57.536994 | 2020-08-02T15:00:12 | 2020-08-02T15:00:12 | 253,597,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | proizvod=1
for i in range(4,51):
if i%6==0:
proizvod=proizvod*i
#proizvod*=i
print(f"proizvod je {proizvod}") | [
"zabiljanupavlovic@gmail.com"
] | zabiljanupavlovic@gmail.com |
627bba94d0d7c441615c4d735a19a9d7bf5af2a5 | 35250c1ccc3a1e2ef160f1dab088c9abe0381f9f | /2020/0412/1032.py | ad26688eeefed21f33aea4e895b2f8c31e30e300 | [] | no_license | entrekid/daily_algorithm | 838ab50bd35c1bb5efd8848b9696c848473f17ad | a6df9784cec95148b6c91d804600c4ed75f33f3e | refs/heads/master | 2023-02-07T11:21:58.816085 | 2021-01-02T17:58:38 | 2021-01-02T17:58:38 | 252,633,404 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | import sys
input = sys.stdin.readline
N = int(input())
all = [input().rstrip() for _ in range(N)]
ans = all[0]
length = len(ans)
ret = ""
for index in range(length):
base = ans[index]
for jter in range(1, N):
if all[jter][index] == base:
continue
else:
ret += "?"
break
else:
ret += base
print(ret)
| [
"dat.sci.seol@gmail.com"
] | dat.sci.seol@gmail.com |
b26aa47f86d46143f9e4617cf1f3a9cd1d1ee085 | 6cecdc007a3aafe0c0d0160053811a1197aca519 | /apps/hq/urls.py | dfd7a56494e809ecc98888792013d8fb67b130fe | [] | no_license | commtrack/temp-aquatest | 91d678c927cc4b2dce6f709afe7faf2768b58157 | 3b10d179552b1e9d6a0e4ad5e91a92a05dba19c7 | refs/heads/master | 2016-08-04T18:06:47.582196 | 2010-09-29T13:20:13 | 2010-09-29T13:20:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | from django.conf.urls.defaults import *
import hq.views as views
import settings
urlpatterns = patterns('',
url(r'^$', 'hq.views.dashboard', name="homepage"),
(r'^serverup.txt$', 'hq.views.server_up'),
(r'^change_password/?$', 'hq.views.password_change'),
(r'^no_permissions/?$', 'hq.views.no_permissions'),
url(r'^reporters/add/?$', views.add_reporter, name="add-reporter"),
url(r'^reporters/(?P<pk>\d+)/?$', views.edit_reporter, name="view-reporter"),
(r'^stats/?$', 'hq.views.reporter_stats'),
(r'', include('hq.reporter.api_.urls')),
)
| [
"allen.machary@gmail.com"
] | allen.machary@gmail.com |
e722b56c0586dbb8980d79f634b53921923bc0e4 | ad6c519f356c0c49eb004084b12b5f08e3cd2e9e | /contrib/compile_less.py | 0f21f7dd011811897c55b5aeecc5ff29cd6b0aaa | [
"MIT"
] | permissive | csilvers/kake | 1a773e7c2232ea243be256bb5e6bd92e0189db9d | 51465b12d267a629dd61778918d83a2a134ec3b2 | refs/heads/master | 2021-05-05T23:07:40.425063 | 2019-01-23T23:35:48 | 2019-01-23T23:35:48 | 116,594,798 | 0 | 0 | MIT | 2019-01-23T23:19:17 | 2018-01-07T19:59:09 | Python | UTF-8 | Python | false | false | 2,249 | py | # TODO(colin): fix these lint errors (http://pep8.readthedocs.io/en/release-1.7.x/intro.html#error-codes)
# pep8-disable:E131
"""A Compile object (see compile_rule.py): foo.less -> foo.less.css."""
from __future__ import absolute_import
import json
from kake.lib import compile_rule
from kake.lib import computed_inputs
_LESS_COMPILATION_FAILURE_RESPONSE = """
body * {
display: none !important;
}
body {
background: #bbb !important;
margin: 20px !important;
color: #900 !important;
font-family: Menlo, Consolas, Monaco, monospace !important;
font-weight: bold !important;
white-space: pre !important;
}
body:before {
content: %s
}
"""
class CompileLess(compile_rule.CompileBase):
def version(self):
"""Update every time build() changes in a way that affects output."""
return 3
def build(self, outfile_name, infile_names, _, context):
# As the lone other_input, the lessc compiler is the last infile.
(retcode, stdout, stderr) = self.try_call_with_output(
[self.abspath(infile_names[-1]),
'--no-color',
'--source-map', # writes to <outfile>.map
'--source-map-rootpath=/',
'--source-map-basepath=%s' % self.abspath(''),
self.abspath(infile_names[0]),
self.abspath(outfile_name)])
if retcode:
message = 'Compiling Less file %s failed:\n%s\n' % (
infile_names[0], stderr)
raise compile_rule.GracefulCompileFailure(
message,
_LESS_COMPILATION_FAILURE_RESPONSE %
# Use \A instead of \n in CSS strings:
# http://stackoverflow.com/a/9063069
json.dumps(message).replace("\\n", " \\A "))
# Less files have an include-structure, which means that whenever an
# included file changes, we need to rebuild. Hence we need to use a
# computed input.
compile_rule.register_compile(
'COMPILED LESS',
'genfiles/compiled_less/en/{{path}}.less.css',
computed_inputs.ComputedIncludeInputs(
'{{path}}.less',
r'^@import\s*"([^"]*)"',
other_inputs=['genfiles/node_modules/.bin/lessc']),
CompileLess())
| [
"csilvers@khanacademy.org"
] | csilvers@khanacademy.org |
3477905774ed5edf89834341f37552f9d7ae3118 | 21e27d3db70f99de096969553b689c58cd09c42b | /updatelineno_of_paleoword.py | 218e5ee0aea4cf63e9fb07e7d980e88fb6b4b706 | [] | no_license | suhailvs/django-qurantorah | e520b0030422f8a4311763628daebbbbd392c34c | 151b9abf3428654b6c490d3df392c0d163c79c6e | refs/heads/master | 2021-06-15T02:11:14.688457 | 2020-03-15T07:31:33 | 2020-03-15T07:31:33 | 159,264,131 | 0 | 0 | null | 2021-03-18T21:11:04 | 2018-11-27T02:31:00 | Python | UTF-8 | Python | false | false | 1,063 | py | import json
from torah.models import Word,Line
"""
USAGE
=====
./manage.py shell
>>> import updatelineno_of_paleoword
>>> updatelineno_of_paleoword.save_word_to_db()
"""
def save_word_to_db():
chap='genesis,exodus,leviticus,numbers,deuteronomy'
n_lines, n_word, n_letters= 0,0,0
for title in chap.split(','):
paleo_data = json.loads(open('torah/json/paleo/%s.json'%title).read())
for i,chapter in enumerate(paleo_data['text']):
# data_word = []
# data_line = []
n_lines+=len(chapter)
for j,line in enumerate(chapter):
n_word+=len(line.split(' '))
for word in line.split(' '):
w, created = Word.objects.get_or_create(name = word)
l = Line.objects.get(title = title, chapter = i+1, line = j+1)
w.lines.add(l)
#if not Word.objects.filter(name=word):
#data_word.append(Word(name=word))
n_letters+=len(word)
# data_line.append(Line(title = title, chapter = i+1, line = j+1))
# Word.objects.bulk_create(data_word)
# Line.objects.bulk_create(data_line)
print(n_lines,n_word,n_letters)
| [
"suhailvs@gmail.com"
] | suhailvs@gmail.com |
fa6be7b270c471aac3b51fad9c4218e92d997953 | 338dbd8788b019ab88f3c525cddc792dae45036b | /lib/python3.6/site-packages/statsmodels/discrete/tests/test_count_model.py | d34bc7c1d26bd2a450cb46076826e10791e365af | [] | permissive | KshitizSharmaV/Quant_Platform_Python | 9b8b8557f13a0dde2a17de0e3352de6fa9b67ce3 | d784aa0604d8de5ba5ca0c3a171e3556c0cd6b39 | refs/heads/master | 2022-12-10T11:37:19.212916 | 2019-07-09T20:05:39 | 2019-07-09T20:05:39 | 196,073,658 | 1 | 2 | BSD-3-Clause | 2022-11-27T18:30:16 | 2019-07-09T19:48:26 | Python | UTF-8 | Python | false | false | 23,297 | py | from __future__ import division
from statsmodels.compat.scipy import SP_GTE_019
import numpy as np
from numpy.testing import (assert_,
assert_equal, assert_array_equal, assert_allclose)
import pytest
import statsmodels.api as sm
from .results.results_discrete import RandHIE
from .test_discrete import CheckModelMixin
class CheckGeneric(CheckModelMixin):
def test_params(self):
assert_allclose(self.res1.params, self.res2.params, atol=1e-5, rtol=1e-5)
def test_llf(self):
assert_allclose(self.res1.llf, self.res2.llf, atol=1e-5, rtol=1e-5)
def test_conf_int(self):
assert_allclose(self.res1.conf_int(), self.res2.conf_int, atol=1e-3, rtol=1e-5)
def test_bse(self):
assert_allclose(self.res1.bse, self.res2.bse, atol=1e-3, rtol=1e-3)
def test_aic(self):
assert_allclose(self.res1.aic, self.res2.aic, atol=1e-2, rtol=1e-2)
def test_bic(self):
assert_allclose(self.res1.aic, self.res2.aic, atol=1e-1, rtol=1e-1)
def test_t(self):
unit_matrix = np.identity(self.res1.params.size)
t_test = self.res1.t_test(unit_matrix)
assert_allclose(self.res1.tvalues, t_test.tvalue)
def test_fit_regularized(self):
model = self.res1.model
alpha = np.ones(len(self.res1.params))
alpha[-2:] = 0
res_reg = model.fit_regularized(alpha=alpha*0.01, disp=0, maxiter=500)
assert_allclose(res_reg.params[2:], self.res1.params[2:],
atol=5e-2, rtol=5e-2)
def test_init_keys(self):
init_kwds = self.res1.model._get_init_kwds()
assert_equal(set(init_kwds.keys()), set(self.init_keys))
for key, value in self.init_kwds.items():
assert_equal(init_kwds[key], value)
def test_null(self):
# call llnull, so null model is attached, side effect of cached attribute
self.res1.llnull
# check model instead of value
exog_null = self.res1.res_null.model.exog
exog_infl_null = self.res1.res_null.model.exog_infl
assert_array_equal(exog_infl_null.shape,
(len(self.res1.model.exog), 1))
assert_equal(np.ptp(exog_null), 0)
assert_equal(np.ptp(exog_infl_null), 0)
@pytest.mark.smoke
def test_summary(self):
summ = self.res1.summary()
# GH 4581
assert 'Covariance Type:' in str(summ)
class TestZeroInflatedModel_logit(CheckGeneric):
@classmethod
def setup_class(cls):
data = sm.datasets.randhie.load(as_pandas=False)
cls.endog = data.endog
exog = sm.add_constant(data.exog[:,1:4], prepend=False)
exog_infl = sm.add_constant(data.exog[:,0], prepend=False)
cls.res1 = sm.ZeroInflatedPoisson(data.endog, exog,
exog_infl=exog_infl, inflation='logit').fit(method='newton', maxiter=500,
disp=0)
# for llnull test
cls.res1._results._attach_nullmodel = True
cls.init_keys = ['exog_infl', 'exposure', 'inflation', 'offset']
cls.init_kwds = {'inflation': 'logit'}
res2 = RandHIE.zero_inflated_poisson_logit
cls.res2 = res2
class TestZeroInflatedModel_probit(CheckGeneric):
@classmethod
def setup_class(cls):
data = sm.datasets.randhie.load(as_pandas=False)
cls.endog = data.endog
exog = sm.add_constant(data.exog[:,1:4], prepend=False)
exog_infl = sm.add_constant(data.exog[:,0], prepend=False)
cls.res1 = sm.ZeroInflatedPoisson(data.endog, exog,
exog_infl=exog_infl, inflation='probit').fit(method='newton', maxiter=500,
disp=0)
# for llnull test
cls.res1._results._attach_nullmodel = True
cls.init_keys = ['exog_infl', 'exposure', 'inflation', 'offset']
cls.init_kwds = {'inflation': 'probit'}
res2 = RandHIE.zero_inflated_poisson_probit
cls.res2 = res2
class TestZeroInflatedModel_offset(CheckGeneric):
@classmethod
def setup_class(cls):
data = sm.datasets.randhie.load(as_pandas=False)
cls.endog = data.endog
exog = sm.add_constant(data.exog[:,1:4], prepend=False)
exog_infl = sm.add_constant(data.exog[:,0], prepend=False)
cls.res1 = sm.ZeroInflatedPoisson(data.endog, exog,
exog_infl=exog_infl, offset=data.exog[:,7]).fit(method='newton',
maxiter=500,
disp=0)
# for llnull test
cls.res1._results._attach_nullmodel = True
cls.init_keys = ['exog_infl', 'exposure', 'inflation', 'offset']
cls.init_kwds = {'inflation': 'logit'}
res2 = RandHIE.zero_inflated_poisson_offset
cls.res2 = res2
def test_exposure(self):
# This test mostly the equivalence of offset and exposure = exp(offset)
# use data arrays from class model
model1 = self.res1.model
offset = model1.offset
model3 = sm.ZeroInflatedPoisson(model1.endog, model1.exog,
exog_infl=model1.exog_infl, exposure=np.exp(offset))
res3 = model3.fit(start_params=self.res1.params,
method='newton', maxiter=500, disp=0)
assert_allclose(res3.params, self.res1.params, atol=1e-6, rtol=1e-6)
fitted1 = self.res1.predict()
fitted3 = self.res1.predict()
assert_allclose(fitted3, fitted1, atol=1e-6, rtol=1e-6)
ex = model1.exog
ex_infl = model1.exog_infl
offset = model1.offset
fitted1_0 = self.res1.predict(exog=ex, exog_infl=ex_infl,
offset=offset)
fitted3_0 = res3.predict(exog=ex, exog_infl=ex_infl,
exposure=np.exp(offset))
assert_allclose(fitted3_0, fitted1_0, atol=1e-6, rtol=1e-6)
ex = model1.exog[:10:2]
ex_infl = model1.exog_infl[:10:2]
offset = offset[:10:2]
# # TODO: this raises with shape mismatch,
# # i.e. uses offset or exposure from model -> fix it or not?
# GLM.predict to setting offset and exposure to zero
# fitted1_1 = self.res1.predict(exog=ex, exog_infl=ex_infl)
# fitted3_1 = res3.predict(exog=ex, exog_infl=ex_infl)
# assert_allclose(fitted3_1, fitted1_1, atol=1e-6, rtol=1e-6)
fitted1_2 = self.res1.predict(exog=ex, exog_infl=ex_infl,
offset=offset)
fitted3_2 = res3.predict(exog=ex, exog_infl=ex_infl,
exposure=np.exp(offset))
assert_allclose(fitted3_2, fitted1_2, atol=1e-6, rtol=1e-6)
assert_allclose(fitted1_2, fitted1[:10:2], atol=1e-6, rtol=1e-6)
assert_allclose(fitted3_2, fitted1[:10:2], atol=1e-6, rtol=1e-6)
class TestZeroInflatedModelPandas(CheckGeneric):
@classmethod
def setup_class(cls):
data = sm.datasets.randhie.load_pandas()
cls.endog = data.endog
cls.data = data
exog = sm.add_constant(data.exog.iloc[:,1:4], prepend=False)
exog_infl = sm.add_constant(data.exog.iloc[:,0], prepend=False)
# we don't need to verify convergence here
start_params = np.asarray([0.10337834587498942, -1.0459825102508549,
-0.08219794475894268, 0.00856917434709146,
-0.026795737379474334, 1.4823632430107334])
model = sm.ZeroInflatedPoisson(data.endog, exog,
exog_infl=exog_infl, inflation='logit')
cls.res1 = model.fit(start_params=start_params, method='newton',
maxiter=500, disp=0)
# for llnull test
cls.res1._results._attach_nullmodel = True
cls.init_keys = ['exog_infl', 'exposure', 'inflation', 'offset']
cls.init_kwds = {'inflation': 'logit'}
res2 = RandHIE.zero_inflated_poisson_logit
cls.res2 = res2
def test_names(self):
param_names = ['inflate_lncoins', 'inflate_const', 'idp', 'lpi',
'fmde', 'const']
assert_array_equal(self.res1.model.exog_names, param_names)
assert_array_equal(self.res1.params.index.tolist(), param_names)
assert_array_equal(self.res1.bse.index.tolist(), param_names)
exog = sm.add_constant(self.data.exog.iloc[:,1:4], prepend=True)
exog_infl = sm.add_constant(self.data.exog.iloc[:,0], prepend=True)
param_names = ['inflate_const', 'inflate_lncoins', 'const', 'idp',
'lpi', 'fmde']
model = sm.ZeroInflatedPoisson(self.data.endog, exog,
exog_infl=exog_infl, inflation='logit')
assert_array_equal(model.exog_names, param_names)
class TestZeroInflatedPoisson_predict(object):
@classmethod
def setup_class(cls):
expected_params = [1, 0.5]
np.random.seed(123)
nobs = 200
exog = np.ones((nobs, 2))
exog[:nobs//2, 1] = 2
mu_true = exog.dot(expected_params)
cls.endog = sm.distributions.zipoisson.rvs(mu_true, 0.05,
size=mu_true.shape)
model = sm.ZeroInflatedPoisson(cls.endog, exog)
cls.res = model.fit(method='bfgs', maxiter=5000, maxfun=5000, disp=0)
def test_mean(self):
assert_allclose(self.res.predict().mean(), self.endog.mean(),
atol=1e-2, rtol=1e-2)
def test_var(self):
assert_allclose((self.res.predict().mean() *
self.res._dispersion_factor.mean()),
self.endog.var(), atol=5e-2, rtol=5e-2)
def test_predict_prob(self):
res = self.res
endog = res.model.endog
pr = res.predict(which='prob')
pr2 = sm.distributions.zipoisson.pmf(np.arange(7)[:,None],
res.predict(), 0.05).T
assert_allclose(pr, pr2, rtol=0.05, atol=0.05)
@pytest.mark.slow
class TestZeroInflatedGeneralizedPoisson(CheckGeneric):
@classmethod
def setup_class(cls):
data = sm.datasets.randhie.load(as_pandas=False)
cls.endog = data.endog
exog = sm.add_constant(data.exog[:,1:4], prepend=False)
exog_infl = sm.add_constant(data.exog[:,0], prepend=False)
cls.res1 = sm.ZeroInflatedGeneralizedPoisson(data.endog, exog,
exog_infl=exog_infl, p=1).fit(method='newton', maxiter=500, disp=0)
# for llnull test
cls.res1._results._attach_nullmodel = True
cls.init_keys = ['exog_infl', 'exposure', 'inflation', 'offset', 'p']
cls.init_kwds = {'inflation': 'logit', 'p': 1}
res2 = RandHIE.zero_inflated_generalized_poisson
cls.res2 = res2
def test_bse(self):
pass
def test_conf_int(self):
pass
def test_bic(self):
pass
def test_t(self):
unit_matrix = np.identity(self.res1.params.size)
t_test = self.res1.t_test(unit_matrix)
assert_allclose(self.res1.tvalues, t_test.tvalue)
def test_minimize(self, reset_randomstate):
# check additional optimizers using the `minimize` option
model = self.res1.model
# use the same start_params, but avoid recomputing
start_params = self.res1.mle_settings['start_params']
res_ncg = model.fit(start_params=start_params,
method='minimize', min_method="trust-ncg",
maxiter=500, disp=0)
assert_allclose(res_ncg.params, self.res2.params,
atol=1e-3, rtol=0.04)
assert_allclose(res_ncg.bse, self.res2.bse,
atol=1e-3, rtol=0.6)
assert_(res_ncg.mle_retvals['converged'] is True)
res_dog = model.fit(start_params=start_params,
method='minimize', min_method="dogleg",
maxiter=500, disp=0)
assert_allclose(res_dog.params, self.res2.params,
atol=1e-3, rtol=3e-3)
assert_allclose(res_dog.bse, self.res2.bse,
atol=1e-3, rtol=0.6)
assert_(res_dog.mle_retvals['converged'] is True)
# Ser random_state here to improve reproducibility
random_state = np.random.RandomState(1)
seed = {'seed': random_state} if SP_GTE_019 else {}
res_bh = model.fit(start_params=start_params,
method='basinhopping', niter=500, stepsize=0.1,
niter_success=None, disp=0, interval=1, **seed)
assert_allclose(res_bh.params, self.res2.params,
atol=1e-4, rtol=1e-4)
assert_allclose(res_bh.bse, self.res2.bse,
atol=1e-3, rtol=0.6)
# skip, res_bh reports converged is false but params agree
#assert_(res_bh.mle_retvals['converged'] is True)
class TestZeroInflatedGeneralizedPoisson_predict(object):
@classmethod
def setup_class(cls):
expected_params = [1, 0.5, 0.5]
np.random.seed(1234)
nobs = 200
exog = np.ones((nobs, 2))
exog[:nobs//2, 1] = 2
mu_true = exog.dot(expected_params[:-1])
cls.endog = sm.distributions.zigenpoisson.rvs(mu_true, expected_params[-1],
2, 0.5, size=mu_true.shape)
model = sm.ZeroInflatedGeneralizedPoisson(cls.endog, exog, p=2)
cls.res = model.fit(method='bfgs', maxiter=5000, maxfun=5000, disp=0)
def test_mean(self):
assert_allclose(self.res.predict().mean(), self.endog.mean(),
atol=1e-4, rtol=1e-4)
def test_var(self):
assert_allclose((self.res.predict().mean() *
self.res._dispersion_factor.mean()),
self.endog.var(), atol=0.05, rtol=0.1)
def test_predict_prob(self):
res = self.res
endog = res.model.endog
pr = res.predict(which='prob')
pr2 = sm.distributions.zinegbin.pmf(np.arange(12)[:,None],
res.predict(), 0.5, 2, 0.5).T
assert_allclose(pr, pr2, rtol=0.08, atol=0.05)
class TestZeroInflatedNegativeBinomialP(CheckGeneric):
@classmethod
def setup_class(cls):
data = sm.datasets.randhie.load(as_pandas=False)
cls.endog = data.endog
exog = sm.add_constant(data.exog[:,1], prepend=False)
exog_infl = sm.add_constant(data.exog[:,0], prepend=False)
# cheating for now, parameters are not well identified in this dataset
# see https://github.com/statsmodels/statsmodels/pull/3928#issuecomment-331724022
sp = np.array([1.88, -10.28, -0.20, 1.14, 1.34])
cls.res1 = sm.ZeroInflatedNegativeBinomialP(data.endog, exog,
exog_infl=exog_infl, p=2).fit(start_params=sp, method='nm',
xtol=1e-6, maxiter=5000, disp=0)
# for llnull test
cls.res1._results._attach_nullmodel = True
cls.init_keys = ['exog_infl', 'exposure', 'inflation', 'offset', 'p']
cls.init_kwds = {'inflation': 'logit', 'p': 2}
res2 = RandHIE.zero_inflated_negative_binomial
cls.res2 = res2
def test_params(self):
assert_allclose(self.res1.params, self.res2.params,
atol=1e-3, rtol=1e-3)
def test_conf_int(self):
pass
def test_bic(self):
pass
def test_fit_regularized(self):
model = self.res1.model
alpha = np.ones(len(self.res1.params))
alpha[-2:] = 0
res_reg = model.fit_regularized(alpha=alpha*0.01, disp=0, maxiter=500)
assert_allclose(res_reg.params[2:], self.res1.params[2:],
atol=1e-1, rtol=1e-1)
# possibly slow, adds 25 seconds
def test_minimize(self, reset_randomstate):
# check additional optimizers using the `minimize` option
model = self.res1.model
# use the same start_params, but avoid recomputing
start_params = self.res1.mle_settings['start_params']
res_ncg = model.fit(start_params=start_params,
method='minimize', min_method="trust-ncg",
maxiter=500, disp=0)
assert_allclose(res_ncg.params, self.res2.params,
atol=1e-3, rtol=0.03)
assert_allclose(res_ncg.bse, self.res2.bse,
atol=1e-3, rtol=0.06)
assert_(res_ncg.mle_retvals['converged'] is True)
res_dog = model.fit(start_params=start_params,
method='minimize', min_method="dogleg",
maxiter=500, disp=0)
assert_allclose(res_dog.params, self.res2.params,
atol=1e-3, rtol=3e-3)
assert_allclose(res_dog.bse, self.res2.bse,
atol=1e-3, rtol=7e-3)
assert_(res_dog.mle_retvals['converged'] is True)
res_bh = model.fit(start_params=start_params,
method='basinhopping', maxiter=500,
niter_success=3, disp=0)
assert_allclose(res_bh.params, self.res2.params,
atol=1e-4, rtol=3e-4)
assert_allclose(res_bh.bse, self.res2.bse,
atol=1e-3, rtol=1e-3)
# skip, res_bh reports converged is false but params agree
#assert_(res_bh.mle_retvals['converged'] is True)
class TestZeroInflatedNegativeBinomialP_predict(object):
@classmethod
def setup_class(cls):
expected_params = [1, 1, 0.5]
np.random.seed(987123)
nobs = 500
exog = np.ones((nobs, 2))
exog[:nobs//2, 1] = 0
prob_infl = 0.15
mu_true = np.exp(exog.dot(expected_params[:-1]))
cls.endog = sm.distributions.zinegbin.rvs(mu_true,
expected_params[-1], 2, prob_infl, size=mu_true.shape)
model = sm.ZeroInflatedNegativeBinomialP(cls.endog, exog, p=2)
cls.res = model.fit(method='bfgs', maxiter=5000, maxfun=5000, disp=0)
# attach others
cls.prob_infl = prob_infl
def test_mean(self):
assert_allclose(self.res.predict().mean(), self.endog.mean(),
rtol=0.01)
def test_var(self):
# todo check precision
assert_allclose((self.res.predict().mean() *
self.res._dispersion_factor.mean()),
self.endog.var(), rtol=0.2)
def test_predict_prob(self):
res = self.res
endog = res.model.endog
pr = res.predict(which='prob')
pr2 = sm.distributions.zinegbin.pmf(np.arange(pr.shape[1])[:,None],
res.predict(), 0.5, 2, self.prob_infl).T
assert_allclose(pr, pr2, rtol=0.1, atol=0.1)
prm = pr.mean(0)
pr2m = pr2.mean(0)
freq = np.bincount(endog.astype(int)) / len(endog)
assert_allclose(((pr2m - prm)**2).mean(), 0, rtol=1e-10, atol=5e-4)
assert_allclose(((prm - freq)**2).mean(), 0, rtol=1e-10, atol=1e-4)
def test_predict_generic_zi(self):
# These tests don't use numbers from other packages.
# Tests are on closeness of estimated to true/DGP values
# and theoretical relationship between quantities
res = self.res
endog = self.endog
exog = self.res.model.exog
prob_infl = self.prob_infl
nobs = len(endog)
freq = np.bincount(endog.astype(int)) / len(endog)
probs = res.predict(which='prob')
probsm = probs.mean(0)
assert_allclose(freq, probsm, atol=0.02)
probs_unique = res.predict(exog=[[1, 0], [1, 1]],
exog_infl=np.asarray([[1], [1]]),
which='prob')
# no default for exog_infl yet
#probs_unique = res.predict(exog=[[1, 0], [1, 1]], which='prob')
probs_unique2 = probs[[1, nobs-1]]
assert_allclose(probs_unique, probs_unique2, atol=1e-10)
probs0_unique = res.predict(exog=[[1, 0], [1, 1]],
exog_infl=np.asarray([[1], [1]]),
which='prob-zero')
assert_allclose(probs0_unique, probs_unique2[:, 0], rtol=1e-10)
probs_main_unique = res.predict(exog=[[1, 0], [1, 1]],
exog_infl=np.asarray([[1], [1]]),
which='prob-main')
probs_main = res.predict(which='prob-main')
probs_main[[0,-1]]
assert_allclose(probs_main_unique, probs_main[[0,-1]], rtol=1e-10)
assert_allclose(probs_main_unique, 1 - prob_infl, atol=0.01)
pred = res.predict(exog=[[1, 0], [1, 1]],
exog_infl=np.asarray([[1], [1]]))
pred1 = endog[exog[:, 1] == 0].mean(), endog[exog[:, 1] == 1].mean()
assert_allclose(pred, pred1, rtol=0.05)
pred_main_unique = res.predict(exog=[[1, 0], [1, 1]],
exog_infl=np.asarray([[1], [1]]),
which='mean-main')
assert_allclose(pred_main_unique, np.exp(np.cumsum(res.params[1:3])),
rtol=1e-10)
# TODO: why does the following fail, params are not close enough to DGP
# but results are close statistics of simulated data
# what is mu_true in DGP sm.distributions.zinegbin.rvs
# assert_allclose(pred_main_unique, mu_true[[1, -1]] * (1 - prob_infl), rtol=0.01)
# mean-nonzero
mean_nz = (endog[(exog[:, 1] == 0) & (endog > 0)].mean(),
endog[(exog[:, 1] == 1) & (endog > 0)].mean())
pred_nonzero_unique = res.predict(exog=[[1, 0], [1, 1]],
exog_infl=np.asarray([[1], [1]]), which='mean-nonzero')
assert_allclose(pred_nonzero_unique, mean_nz, rtol=0.05)
pred_lin_unique = res.predict(exog=[[1, 0], [1, 1]],
exog_infl=np.asarray([[1], [1]]),
which='linear')
assert_allclose(pred_lin_unique, np.cumsum(res.params[1:3]), rtol=1e-10)
class TestZeroInflatedNegativeBinomialP_predict2(object):
@classmethod
def setup_class(cls):
data = sm.datasets.randhie.load(as_pandas=False)
cls.endog = data.endog
exog = data.exog
start_params = np.array([
-2.83983767, -2.31595924, -3.9263248, -4.01816431, -5.52251843,
-2.4351714, -4.61636366, -4.17959785, -0.12960256, -0.05653484,
-0.21206673, 0.08782572, -0.02991995, 0.22901208, 0.0620983,
0.06809681, 0.0841814, 0.185506, 1.36527888])
mod = sm.ZeroInflatedNegativeBinomialP(
cls.endog, exog, exog_infl=exog, p=2)
res = mod.fit(start_params=start_params, method="bfgs",
maxiter=1000, disp=0)
cls.res = res
def test_mean(self):
assert_allclose(self.res.predict().mean(), self.endog.mean(),
atol=0.02)
def test_zero_nonzero_mean(self):
mean1 = self.endog.mean()
mean2 = ((1 - self.res.predict(which='prob-zero').mean()) *
self.res.predict(which='mean-nonzero').mean())
assert_allclose(mean1, mean2, atol=0.2)
| [
"kshitizsharmav@gmail.com"
] | kshitizsharmav@gmail.com |
74c8e9860eae167b136c05285e22a67e2c9a8de8 | 920b9cb23d3883dcc93b1682adfee83099fee826 | /itsm/role/utils.py | 17cfe9461e59ce1f7ab21362402eb584388b0f6c | [
"MIT",
"LGPL-2.1-or-later",
"LGPL-3.0-only"
] | permissive | TencentBlueKing/bk-itsm | f817fb166248d3059857b57d03e8b5ec1b78ff5b | 2d708bd0d869d391456e0fb8d644af3b9f031acf | refs/heads/master | 2023-08-31T23:42:32.275836 | 2023-08-22T08:17:54 | 2023-08-22T08:17:54 | 391,839,825 | 100 | 86 | MIT | 2023-09-14T08:24:54 | 2021-08-02T06:35:16 | Python | UTF-8 | Python | false | false | 1,710 | py | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-ITSM 蓝鲸流程服务 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-ITSM 蓝鲸流程服务 is licensed under the MIT License.
License for BK-ITSM 蓝鲸流程服务:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from django.utils.translation import ugettext as _
def translate_constant_2(constant):
temp_constant = []
for item in constant:
# py2->py3: 'str' object has no attribute 'decode'
temp_constant.append((item[0], _(item[1])))
constant = temp_constant
return constant
| [
"1758504262@qq.com"
] | 1758504262@qq.com |
24f7f8a2e67f6628493069a129b394a4ff11d1d4 | 2f0d56cdcc4db54f9484b3942db88d79a4215408 | /.history/Python_Learning/lesson13_20200329114830.py | ba04f40090cb55cf629dc5824f55791317a02c91 | [] | no_license | xiangxing98/xiangxing98.github.io | 8571c8ee8509c0bccbb6c2f3740494eedc53e418 | 23618666363ecc6d4acd1a8662ea366ddf2e6155 | refs/heads/master | 2021-11-17T19:00:16.347567 | 2021-11-14T08:35:01 | 2021-11-14T08:35:01 | 33,877,060 | 7 | 1 | null | 2017-07-01T16:42:49 | 2015-04-13T15:35:01 | HTML | UTF-8 | Python | false | false | 429 | py | # -*- encoding: utf-8 -*-
# !/usr/bin/env python
'''
@File : lesson13.py
@Time : 2020/03/29 11:43:27
@Author : Stone_Hou
@Version : 1.0
@Contact : xiangxing985529@163.com
@License : (C)Copyright 2010-2020, Stone_Hou
@Desc : None
'''
# here put the import lib
# Practice #01
print('He said,"I\'m yours!"')
# He said, "I'm yours!"
# Practice #02
print('\\\\_v_//')
\\_v_//
# Practice #03
# Practice #04 | [
"xiangxing985529@163.com"
] | xiangxing985529@163.com |
e1ac78e0b777110562d318e87c495e8dbe70df4d | 8096e140f0fd38b9492e0fcf307990b1a5bfc3dd | /Python/pick6/pick6.py | 25239e92262858f2921b73fe2ff362a129aeeefd | [] | no_license | perennialAutodidact/PDXCodeGuild_Projects | 0cacd44499c0bdc0c157555fe5466df6d8eb09b6 | 28a8258eba41e1fe6c135f54b230436ea7d28678 | refs/heads/master | 2022-11-15T22:26:45.775550 | 2020-07-07T17:13:01 | 2020-07-07T17:13:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,465 | py | import random
def pick6():
nums = []
i = 0
while i < 6:
num = random.randint(1,99)
nums.append(num)
i += 1
return nums
def check_ticket(ticket, goal):
matches = []
for index, num in enumerate(ticket):
if num == goal[index]:
matches.append(num)
else:
matches.append(0)
return matches
def collect_winnings(matches):
scores = {0:0, 1:4, 2:7, 3:100, 4:50000, 5:1000000, 6:25000000} # number of matches vs dollar rewards
i = 0 # number of matches
for num in matches:
if num > 0:
i += 1
reward = scores[i]
return reward
def get_roi(investment, earnings):
roi = (earnings - investment)/investment
return roi
def main():
plays = int(input("\nWelcome to Pick 6. Enter the number of times you'd like to play: "))
winning_ticket = pick6()
ticket_price = 2
balance = 0
earnings = 0
i = 0
while i < plays:
balance -= ticket_price
ticket = pick6()
matches = check_ticket(ticket, winning_ticket)
reward = collect_winnings(matches)
if reward:
balance += reward
earnings += reward
i += 1
investment = ticket_price * plays
roi = get_roi(investment, earnings)
print(f"Your final balance is ${balance}. You won ${earnings}. The return on your investment of ${investment} was {roi}")
main() | [
"keegood8@gmail.com"
] | keegood8@gmail.com |
a1f99b984843bb214cc729eb084b6f32c4a7f041 | b7f91e2cbc1deea0b33f4293336876568e0b263d | /parctice/training_code/Armour_plate_detection_training/rpn_s/train.py | b1b9d79fc873a59f700e74ebc2b830009f5e909b | [] | no_license | wonggw/Robomaster_NTU_2018 | 196776d3b0a3b581a0c0db9618c1a9f59634ceb9 | cac54d22b6ac3f3a94790ed056660cdccb78558d | refs/heads/master | 2020-03-25T01:52:58.963643 | 2018-08-01T03:34:37 | 2018-08-01T03:34:37 | 143,262,046 | 0 | 0 | null | 2018-08-02T07:57:47 | 2018-08-02T07:57:47 | null | UTF-8 | Python | false | false | 2,161 | py | import numpy as np
import netpart
import data_reader
import model as M
import tensorflow as tf
import cv2
import time
import myconvertmod as cvt
import os
if not os.path.exists('./model/'):
os.mkdir('./model/')
reader = data_reader.reader(height=240,width=320,scale_range=[0.05,1.2],
lower_bound=3,upper_bound=5,index_multiplier=2)
def draw(img,c,b,multip,name):
c = c[0]
b = b[0]
row,col,_ = b.shape
# print(b.shape,c.shape)
# print(row,col)
for i in range(row):
for j in range(col):
# print(i,j)
if c[i][j][0]>-0.5:
x = int(b[i][j][0])+j*multip+multip//2
y = int(b[i][j][1])+i*multip+multip//2
w = int(b[i][j][2])
h = int(b[i][j][3])
cv2.rectangle(img,(x-w//2,y-h//2),(x+w//2,y+h//2),(0,255,0),2)
cv2.imshow(name,img)
cv2.waitKey(1)
def draw2(img,c,b,multip,name):
c = c[0]
b = b[0]
row,col,_ = b.shape
c = c.reshape([-1])
ind = c.argsort()[-5:][::-1]
for aaa in ind:
# print(aaa)
i = aaa//col
j = aaa%col
x = int(b[i][j][0])+j*multip+multip//2
y = int(b[i][j][1])+i*multip+multip//2
w = int(b[i][j][2])
h = int(b[i][j][3])
cv2.rectangle(img,(x-w//2,y-h//2),(x+w//2,y+h//2),(0,255,0),2)
cv2.imshow(name,img)
cv2.waitKey(1)
b0,b1,c0,c1 = netpart.model_out
netout = [[b0,c0],[b1,c1]]
t1 = time.time()
MAX_ITER = 500000
with tf.Session() as sess:
saver = tf.train.Saver()
M.loadSess('./model/',sess)
for i in range(MAX_ITER):
img, train_dic = reader.get_img()
for k in train_dic:
ls,_,b,c = sess.run([netpart.loss_functions[k],
netpart.train_steps[k]] + netout[k],
feed_dict={netpart.inpholder:[img],
netpart.b_labholder:[train_dic[k][1]],
netpart.c_labholder:[train_dic[k][0]]})
if i%10==0:
t2 = time.time()
remain_time = float(MAX_ITER - i) / float(i+1) * (t2-t1)
h,m,s = cvt.sec2hms(remain_time)
print('Iter:\t%d\tLoss:\t%.6f\tK:%d\tETA:%d:%d:%d'%(i,ls,k,h,m,s))
if i%100==0:
if k==0:
multip = 8
elif k==1:
multip = 32
# multip = 8 if k==0 else 32
draw(img.copy(),[train_dic[k][0]],[train_dic[k][1]],multip,'lab')
draw2(img.copy(),c,b,multip,'pred')
if i%2000==0 and i>0:
saver.save(sess,'./model/MSRPN_%d.ckpt'%i) | [
"cy960823@outlook.com"
] | cy960823@outlook.com |
379fd53a5f423321a7a8eb0065c23cc0be712f0c | 563bdb036cb8cbfbd55fe05888c3ff6ec121b660 | /documenter2docset/documenter.py | e79a80bd15e0a601ca8531819ca511e3b1a8f5df | [] | no_license | jlumpe/documenter2docset | 8858697f8b26f465b166d373bdd093239fb00901 | ce5ef6572e51b783529c04a2794af67552867d3d | refs/heads/master | 2020-04-13T16:52:03.980076 | 2018-12-28T20:27:19 | 2018-12-28T20:27:19 | 163,332,319 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | """Work with documentation generated by Documenter.jl."""
import ast
def read_search_index(fh):
"""Read search_index.js.
Can't part strictly as JSON (even after removing the variable assignment
at the beginning because it isn't formatted correctly (comma after last
element of array). Use ast.literal_eval instead because it should match
Python dict/list literal syntax.
"""
start = 'var documenterSearchIndex = '
if fh.read(len(start)) != start:
raise ValueError('Failed to parse search index')
rest = fh.read()
data = ast.literal_eval(rest)
return data['docs']
| [
"mjlumpe@gmail.com"
] | mjlumpe@gmail.com |
c9ad645885da2ff6ead3ccefeee3ae71e844d592 | f7038be35e310a21958cd4e6bb9baadaf3c69943 | /python/shred/mango/mango/settings.py | 48564b45e78084e8f244583ee9facc75bd312419 | [] | no_license | code-machina/awesome-tutorials | d0208fadc06bae0c1865d50a8b1c74d2737dae13 | 6c813a1e9c343f8f99c2f6d5be804a829ec149ed | refs/heads/master | 2023-01-12T06:52:35.474881 | 2019-01-31T09:09:46 | 2019-01-31T09:09:46 | 160,049,994 | 1 | 0 | null | 2023-01-07T07:52:20 | 2018-12-02T13:25:58 | JavaScript | UTF-8 | Python | false | false | 3,684 | py | """
Django settings for mango project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'pehq^ro26o4wb^6x)hog5m^*v%=!8yb^@d(1haun#v(dgqyb9g'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'lemon',
'djcelery',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'mango.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mango.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
STATIC_ROOT = '/static/'
STATIC_URL = '/static/'
CELERY_BROKER_URL = 'redis://localhost:6379'
# CELERY_RESULT_BACKEND = 'redis://localhost:6379'
CELERY_ACCEPT_CONTENT = ['application/json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_TIMEZONE = 'Asia/Seoul'
CELERY_RESULT_BACKEND = 'db+sqlite:///db.sqlite'
from datetime import timedelta
from celery.schedules import crontab
# from datetime import timedelta
# TODO: 여기서 YML 파일을 로드하여 동적으로 스케줄을 구성한다.
CELERYBEAT_SCHEDULE = {
'add-every-30-seconds': {
'task': 'tasks.task_sample_add',
'schedule': timedelta(seconds=1),
'args': (16, 16)
},
}
CELERYBEAT_SCHEDULER = 'djcelery.schedulers.DatabaseScheduler'
import djcelery
djcelery.setup_loader() | [
"gbkim1988@gmail.com"
] | gbkim1988@gmail.com |
28fd248312ad2493793bbb3f26584351c889655e | 54d8a05e0238e96eb43e4893bacba024e490bf11 | /python-projects/algo_and_ds/shortest_cell_path_using_bfs_pramp_interview.py | 20d9c4271b29075a2acbe3379700acf8700381c3 | [] | no_license | infinite-Joy/programming-languages | 6ce05aa03afd7edeb0847c2cc952af72ad2db21e | 0dd3fdb679a0052d6d274d19040eadd06ae69cf6 | refs/heads/master | 2023-05-29T10:34:44.075626 | 2022-07-18T13:53:02 | 2022-07-18T13:53:02 | 30,753,185 | 3 | 5 | null | 2023-05-22T21:54:46 | 2015-02-13T11:14:25 | Jupyter Notebook | UTF-8 | Python | false | false | 1,621 | py | from collections import deque
def next_cell(grid, row, col, visited):
# the list is in top, left, bottom, right
nrow = [-1, 0, 1, 0]
ncol = [0, -1, 0, 1]
for rr, cc in zip(nrow, ncol):
updated_row = row + rr
updated_col = col + cc
if 0 <= updated_row < len(grid) and 0 <= updated_col < len(grid[0]):
if (updated_row, updated_col) not in visited and grid[updated_row][updated_col] == 1:
yield updated_row, updated_col
def shortestCellPath(grid, sr, sc, tr, tc):
if len(grid) == 1 and not grid[0]:
return -1
queue = deque([((sr, sc), 0)]) # 0,0; level=0
visited = set((sr, sc))
while queue:
(row, col), level = queue.popleft() # 0, 0 level = 0
if row == tr and col == tc:
return level
for nrow, ncol in next_cell(grid, row, col, visited):
queue.append(((nrow, ncol), level + 1))
visited.add((nrow, ncol))
return -1
grid = [[1, 1, 1, 1], [0, 0, 0, 1], [1, 1, 1, 1]]
print(shortestCellPath(grid, 0, 0, 2, 0))
grid = [[1, 1, 1, 1], [0, 0, 0, 1], [1, 0, 1, 1]]
print(shortestCellPath(grid, 0, 0, 2, 0))
"""
edge case [[]]
input:
grid = [[1, 1, 1, 1], [0, 0, 0, 1], [1, 1, 1, 1]]
sr = 0, sc = 0, tr = 2, tc = 0
output: 8
(The lines below represent this grid:)
1111
0001
1111
using a bfs should give the shortest path
time complexity m*n
space complexity m*n
queue
while queue is present:
take the present row , col and the current level
if target is found: return the level
iterate on the adj rows and cols that are not already visited:
append to the queue with level + 1
add the row , col to the visited
return -1
"""
| [
"joydeepubuntu@gmail.com"
] | joydeepubuntu@gmail.com |
47684ef9baf25752da28cc56054c26aabdfa5f87 | 84f0c73b9147e293a91161d3bd00fdfb8758a1e2 | /django/test1/test1/settings.py | e7105747f60d717d10860b8345297a7f6fe8ce78 | [] | no_license | TrellixVulnTeam/allPythonPractice_R8XZ | 73305067dfc7cfc22ae1a8283f7cb1397e41974b | 45f0885f6526807f1150c6b9e65d50ddf0f37d9f | refs/heads/master | 2023-03-15T19:19:14.020828 | 2019-07-25T16:18:24 | 2019-07-25T16:18:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,852 | py | """
Django settings for test1 project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
# 项目目录的绝对路径
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-yiw=bcb^yy5ao!%n20(eb&=#ez74krb1!-+-e6j1)4swb-3=^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'booktest', # 进行应用注册
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'test1.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')], # 配置模板目录
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'test1.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
# LANGUAGE_CODE = 'en-us'
LANGUAGE_CODE = 'zh-hans' # 使用中文
# TIME_ZONE = 'UTC'
TIME_ZONE = 'Asia/Shanghai' #使用中国时间
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
| [
"focusdroid@126.com"
] | focusdroid@126.com |
172e7821ecc773081c07cf4efa28191b5d77b8e9 | a3b0e7acb6e0d7e73f5e369a17f367ac7caf83fb | /python/free_Code_CAmp/scientific_computing_for_everybody/ch_3/playground.py | 6f990236ce5c0b0436b2382899a7d87f0b97507e | [] | no_license | jadedocelot/Bin2rong | f9d35731ca7df50cfba36141d249db2858121826 | 314b509f7b3b3a6a5d6ce589dbc57a2c6212b3d7 | refs/heads/master | 2023-03-24T14:03:32.374633 | 2021-03-19T00:05:24 | 2021-03-19T00:05:24 | 285,387,043 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 601 | py | import time # A
import random
x = (random.range(1,100))
while x < 100:
x = x + 20
time.sleep(0.19)
print(x)
x = x +12
time.sleep(0.23)
print(x)
x = x + 4
time.sleep(0.38)
print(x)
x = x + 70
time.sleep(0.60)
print(x)
break
print("End of code")
def winScore():
userName = input("Enter in you: ")
print("Great job \n",userName)
user_score = x
if x < 100:
print("\nYour score is :", x)
print("\nyou lose!\n")
else:
print("\nYour score is:", x)
print("\nYou have scored higher than 100, you win!\n")
winScore()
| [
"eabelortega@gmail.com"
] | eabelortega@gmail.com |
580ca49442935c52d5d05ec0a2f0dde826ee4536 | f9e4c2e9cd4a95dc228b384e2e8abadc9f1b0bda | /clubs/views.py | d9f78a81a32d48a5082518ede439e6e26525355d | [] | no_license | sanchitbareja/fratevents | 227adddd77c9a0055ccd74d5e0bf6f771790f8d3 | f50c8ccb40b8c9124b40e70d90c9190ef27a2fb7 | refs/heads/master | 2016-09-06T15:36:45.443412 | 2013-02-16T21:13:36 | 2013-02-16T21:13:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,200 | py | # Create your views here.
from django.template import Context, RequestContext
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template.loader import render_to_string
from django import forms
from django.http import HttpResponseRedirect
from clubs.models import Club
import os, time, simplejson
from datetime import datetime, date
#return json of everything in database
def getClubInfoJSON(request):
results = {'success':False}
try:
if(request.method == u'POST'):
POST = request.POST
print POST['clubID']
club = Club.objects.get(id=POST['clubID'])
print club
results['id'] = club.id
results['name'] = club.name
print "debug 1"
results['description'] = club.description
print "debug 2"
results['typeOfOrganization'] = club.typeOfOrganization
print "debug 3"
results['founded'] = club.founded
print "debug 4"
results['urlPersonal'] = club.urlPersonal
print "debug 7"
results['image'] = club.image
print "debug 9"
results['success'] = True
print "debug 9"
except:
pass
print results
json_results = simplejson.dumps(results)
return HttpResponse(json_results, mimetype='application/json')
| [
"sanchitbareja@gmail.com"
] | sanchitbareja@gmail.com |
77b0263d6f72098ae8615af9a9bca10d2fe3356e | d1752d73dd7dd8a7c0ea5ce3741f18b9c9073af7 | /solutions/Day17_MoreExceptions/Day17_MoreExceptions.py | eedced4a6bfe57d7cedc22b72ec585dd28954258 | [
"MIT"
] | permissive | arsho/Hackerrank_30_Days_of_Code_Solutions | 58ee9277854d67e967e07d62ddbfd155beefd35b | 840e5cbe8025b4488a97d1a51313c19c4e7e91ed | refs/heads/master | 2020-05-04T15:26:49.304023 | 2019-04-03T14:31:29 | 2019-04-03T14:31:29 | 179,241,241 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 528 | py | '''
Title : Day 17: More Exceptions
Domain : Tutorials
Author : Ahmedur Rahman Shovon
Created : 03 April 2019
'''
#Write your code here
class Calculator(object):
def power(self, n, p):
if n < 0 or p < 0:
raise ValueError("n and p should be non-negative")
return n**p
myCalculator=Calculator()
T=int(input())
for i in range(T):
n,p = map(int, input().split())
try:
ans=myCalculator.power(n,p)
print(ans)
except Exception as e:
print(e)
| [
"shovon.sylhet@gmail.com"
] | shovon.sylhet@gmail.com |
6e20ec43fbcfeb13a9b90f75a1e1d161c23035d5 | 27eeee7dc09efc8911ba32e25fc18e2ea79b7843 | /skedulord/job.py | 2c622d0771f9e4e2a9298ff8841c1d6ebc14b299 | [
"MIT"
] | permissive | koaning/skedulord | fe9253e341dbd8442688508954c032fe74a5ad46 | 78dc0e630743a059573c34efdd52104586b2c4de | refs/heads/main | 2022-06-03T18:04:00.279729 | 2022-05-21T12:35:26 | 2022-05-21T12:35:26 | 215,561,785 | 65 | 4 | MIT | 2022-05-01T12:29:48 | 2019-10-16T13:58:06 | Python | UTF-8 | Python | false | false | 2,412 | py | import io
import json
import time
import uuid
import pathlib
import subprocess
import datetime as dt
from skedulord.common import job_name_path, log_heartbeat
from pathlib import Path
class JobRunner:
"""
Object in charge of running a job and logging it.
"""
def __init__(self, name, cmd, retry=3, wait=60):
self.name = name
self.cmd = cmd
self.retry = retry
self.wait = wait
self.start_time = str(dt.datetime.now())[:19].replace(" ", "T")
self.logpath = Path(job_name_path(name)) / f"{self.start_time}.txt"
pathlib.Path(self.logpath).parent.mkdir(parents=True, exist_ok=True)
pathlib.Path(self.logpath).touch()
self.file = self.logpath.open("a")
def _attempt_cmd(self, command, name, run_id):
tries = 1
stop = False
while not stop:
info = {"name": name, "command": command, "run_id": run_id, "attempt": tries, "timestamp": str(dt.datetime.now())}
self.file.writelines([json.dumps(info), "\n"])
output = subprocess.run(
command.split(" "),
cwd=str(pathlib.Path().cwd()),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
encoding="utf-8",
universal_newlines=True,
)
for line in output.stdout.split("\n"):
self.file.writelines([line, "\n"])
if output.returncode == 0:
stop = True
else:
tries += 1
if tries > self.retry:
stop = True
else:
time.sleep(self.wait)
return "fail" if tries > self.retry else "success"
def run(self):
"""
Run and log a command.
"""
run_id = str(uuid.uuid4())[:8]
start_time = self.start_time
status = self._attempt_cmd(command=self.cmd, name=self.name, run_id=run_id)
endtime = str(dt.datetime.now())[:19]
job_name_path(self.name).mkdir(parents=True, exist_ok=True)
logpath = str(job_name_path(self.name) / f"{start_time}.txt")
log_heartbeat(
run_id=run_id,
name=self.name,
command=self.cmd,
status=status,
tic=start_time.replace("T", " "),
toc=endtime,
logpath=logpath
)
| [
"vincentwarmerdam@gmail.com"
] | vincentwarmerdam@gmail.com |
3071abc122ec82db2b2ba1766208aef9f85ec69c | 64546da2b39cf96a490a0b73ce09166e2b704da2 | /backend/course/models.py | e6616fc039c250373458eec7bec5f2d3a47329fa | [] | no_license | crowdbotics-apps/for-later-19794 | 3c547dfef97bd6631929ba3a09954d8edb6bd0de | 501d0f3fd4d58775c964e08e1fe75d69bf759520 | refs/heads/master | 2022-12-06T07:56:19.275934 | 2020-08-26T15:34:01 | 2020-08-26T15:34:01 | 290,533,035 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,850 | py | from django.conf import settings
from django.db import models
class Recording(models.Model):
"Generated Model"
event = models.ForeignKey(
"course.Event", on_delete=models.CASCADE, related_name="recording_event",
)
media = models.URLField()
user = models.ForeignKey(
"users.User", on_delete=models.CASCADE, related_name="recording_user",
)
published = models.DateTimeField()
class Module(models.Model):
"Generated Model"
course = models.ForeignKey(
"course.Course", on_delete=models.CASCADE, related_name="module_course",
)
title = models.CharField(max_length=256,)
description = models.TextField()
class Category(models.Model):
"Generated Model"
name = models.CharField(max_length=256,)
class PaymentMethod(models.Model):
"Generated Model"
user = models.ForeignKey(
"users.User", on_delete=models.CASCADE, related_name="paymentmethod_user",
)
primary = models.BooleanField()
token = models.CharField(max_length=256,)
class SubscriptionType(models.Model):
"Generated Model"
name = models.CharField(max_length=256,)
class Event(models.Model):
"Generated Model"
name = models.CharField(max_length=256,)
user = models.ForeignKey(
"users.User", on_delete=models.CASCADE, related_name="event_user",
)
date = models.DateTimeField()
class Enrollment(models.Model):
"Generated Model"
user = models.ForeignKey(
"users.User", on_delete=models.CASCADE, related_name="enrollment_user",
)
course = models.ForeignKey(
"course.Course", on_delete=models.CASCADE, related_name="enrollment_course",
)
class Subscription(models.Model):
"Generated Model"
subscription_type = models.ForeignKey(
"course.SubscriptionType",
on_delete=models.CASCADE,
related_name="subscription_subscription_type",
)
user = models.ForeignKey(
"users.User", on_delete=models.CASCADE, related_name="subscription_user",
)
class Course(models.Model):
"Generated Model"
author = models.ForeignKey(
"users.User", on_delete=models.CASCADE, related_name="course_author",
)
title = models.CharField(null=True, blank=True, max_length=256,)
description = models.TextField(null=True, blank=True,)
categories = models.ManyToManyField(
"course.Category", blank=True, related_name="course_categories",
)
class Lesson(models.Model):
"Generated Model"
module = models.ForeignKey(
"course.Module", on_delete=models.CASCADE, related_name="lesson_module",
)
title = models.CharField(max_length=256,)
description = models.TextField()
media = models.URLField()
class Group(models.Model):
"Generated Model"
name = models.CharField(max_length=256,)
# Create your models here.
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
e7b886977f81669168fde729b360287c320f19a2 | b885eaf4df374d41c5a790e7635726a4a45413ca | /LeetCode/Session3/ClosestBSTValue.py | 08a5eac84d8b1f8292686d15c2ebb4215e9c68ff | [
"MIT"
] | permissive | shobhitmishra/CodingProblems | 2a5de0850478c3c2889ddac40c4ed73e652cf65f | 0fc8c5037eef95b3ec9826b3a6e48885fc86659e | refs/heads/master | 2021-01-17T23:22:42.442018 | 2020-04-17T18:25:24 | 2020-04-17T18:25:24 | 84,218,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 925 | py | import sys
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def closestValue(self, root: TreeNode, target: float) -> int:
self.minDiffValue = root.val
self.closestValueHelper(root, target)
return self.minDiffValue
def closestValueHelper(self, node: TreeNode, target: float):
if not node:
return
currentDiff = abs(node.val - target)
if currentDiff < abs(self.minDiffValue - target):
self.minDiffValue = node.val
if target < node.val:
self.closestValueHelper(node.left, target)
else:
self.closestValueHelper(node.right, target)
ob = Solution()
root = TreeNode(4)
root.left = TreeNode(2)
root.left.left = TreeNode(1)
root.left.right = TreeNode(3)
root.right = TreeNode(5)
print(ob.closestValue(root, 3.14)) | [
"shmishra@microsoft.com"
] | shmishra@microsoft.com |
8f523c223becca901b73198037ee041f2607b3cf | 1f08436bab6cd03bcfb257e8e49405cbc265195a | /12_File/Sample/io_ex10.py | 69cc54f04c4f232c4450593d34e968477c1e7d3d | [] | no_license | kuchunbk/PythonBasic | e3ba6322f256d577e37deff09c814c3a374b93b2 | a87135d7a98be8830d30acd750d84bcbf777280b | refs/heads/master | 2020-03-10T04:28:42.947308 | 2018-04-17T04:25:51 | 2018-04-17T04:25:51 | 129,192,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 491 | py | '''Question:
Write a Python program to count the frequency of words in a file.
'''
# Python code:
from collections import Counter
def word_count(fname):
with open(fname) as f:
return Counter(f.read().split())
print("Number of words in the file :",word_count("test.txt"))
'''Output sample:
Number of words in the file : Counter({'this': 7, 'Append': 5, 'text.': 5, 'text.Append': 2, 'Welcome': 1, 'to
': 1, 'w3resource.com.': 1})
''' | [
"kuchunbk@gmail.com"
] | kuchunbk@gmail.com |
a6da60cb6269f0ba182ed3e312c8156f7620dea8 | e1386d15b6e67c2e4e301bfb6d2fdfa817ac082d | /tests/unit_tests/handlers/test_delivery_utility_handlers.py | 2610ffaf7d85d9443a29d004dceb09a4dcdee112 | [
"MIT"
] | permissive | arteria-project/arteria-delivery | 002272f5371a77968fad15061ec6aa00068fab14 | 46830f5b891dabb3a1352719842d1a9706641032 | refs/heads/master | 2023-09-04T07:53:12.783287 | 2023-08-04T13:26:41 | 2023-08-04T13:26:41 | 71,244,793 | 0 | 8 | MIT | 2023-08-09T14:52:04 | 2016-10-18T12:15:20 | Python | UTF-8 | Python | false | false | 653 | py |
import json
from tornado.testing import *
from tornado.web import Application
from delivery.app import routes
from delivery import __version__ as checksum_version
from tests.test_utils import DummyConfig
class TestUtilityHandlers(AsyncHTTPTestCase):
API_BASE = "/api/1.0"
def get_app(self):
return Application(
routes(
config=DummyConfig()))
def test_version(self):
response = self.fetch(self.API_BASE + "/version")
expected_result = {"version": checksum_version}
self.assertEqual(response.code, 200)
self.assertEqual(json.loads(response.body), expected_result)
| [
"johan.dahlberg@medsci.uu.se"
] | johan.dahlberg@medsci.uu.se |
a7eafae3cbdf9b3ab0317e0537a060480946ea17 | 4daa80570478d1d4a72ced0b0c288db39df7bbbd | /pychron/graph/stream_graph.py | 695178c1359db85f1c757c9bf874ed7b2f15c6b4 | [
"Apache-2.0"
] | permissive | waffle-iron/pychron | 3f98fad34788371f98ccabf164d03b7d77b0d9ce | 14504782c8f73a6267cd8a59f7d9a609e1df92e3 | refs/heads/develop | 2020-12-29T18:51:44.245858 | 2016-04-21T04:18:09 | 2016-04-21T04:18:09 | 56,742,046 | 0 | 0 | null | 2016-04-21T04:18:08 | 2016-04-21T04:18:07 | null | UTF-8 | Python | false | false | 12,391 | py | # ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
from pychron.core.ui import set_qt
set_qt()
# =============enthought library imports=======================
from pyface.timer.api import do_after as do_after_timer
# =============standard library imports ========================
from numpy import hstack, Inf
import time
# =============local library imports ==========================
# from pychron.graph.editors.stream_plot_editor import StreamPlotEditor
from stacked_graph import StackedGraph
from graph import Graph
MAX_LIMIT = int(-1 * 60 * 60 * 24)
def time_generator(start):
"""
"""
yt = start
prev_time = 0
while 1:
current_time = time.time()
if prev_time != 0:
interval = current_time - prev_time
yt += interval
yield yt
prev_time = current_time
class StreamGraph(Graph):
"""
"""
# plot_editor_klass = StreamPlotEditor
global_time_generator = None
cur_min = None
cur_max = None
# track_y_max = Bool(True)
# track_y_min = Bool(True)
#
# track_x_max = Bool(True)
# track_x_min = Bool(True)
#
#
# force_track_x_flag = False
track_y_max = None
track_y_min = None
track_x_max = None
track_x_min = None
force_track_x_flag = None
def __init__(self, *args, **kw):
super(StreamGraph, self).__init__(*args, **kw)
self.scan_delays = []
self.time_generators = []
self.data_limits = []
self.scan_widths = []
def clear(self):
self.scan_delays = []
self.time_generators = []
self.data_limits = []
self.scan_widths = []
self.cur_min = []
self.cur_max = []
self.track_x_max = True
self.track_x_min = True
self.track_y_max = []
self.track_y_min = []
self.force_track_x_flag = False
super(StreamGraph, self).clear()
def new_plot(self, **kw):
"""
"""
dl = kw.get('data_limit', 500)
sw = kw.get('scan_width', 60)
self.scan_widths.append(sw)
self.data_limits.append(dl)
self.cur_min.append(Inf)
self.cur_max.append(-Inf)
self.track_y_max.append(True)
self.track_y_min.append(True)
args = super(StreamGraph, self).new_plot(**kw)
self.set_x_limits(min_=0, max_=sw * 1.05, plotid=len(self.plots) - 1)
return args
def update_y_limits(self, plotid=0, **kw):
ma = -1
mi = 1e10
for _k, v in self.plots[plotid].plots.iteritems():
ds = v[0].value.get_data()
try:
ma = max(ma, max(ds))
mi = min(mi, min(ds))
except ValueError:
return
if not self.track_y_max[plotid]:
ma = None
if not self.track_y_min[plotid]:
mi = None
self.set_y_limits(min_=mi, max_=ma, plotid=plotid, pad=5, **kw)
def set_scan_width(self, v, plotid=0):
self.scan_widths[plotid] = v
def set_data_limits(self, d, plotid=None):
if plotid is None:
for i in range(len(self.scan_delays)):
self.data_limits[i] = d
else:
self.data_limits[plotid] = d
def _set_xlimits(self, ma, plotid):
sw = self.scan_widths[plotid]
if ma < sw:
mi = 0
ma = sw * 1.05
else:
mi = ma - sw
ma += sw * 0.05
self.set_x_limits(max_=ma,
min_=mi,
plotid=plotid)
def record(self, y, x=None, series=0, plotid=0, track_x=True, track_y=True):
xn, yn = self.series[plotid][series]
plot = self.plots[plotid]
xd = plot.data.get_data(xn)
yd = plot.data.get_data(yn)
if x is None:
try:
tg = self.time_generators[plotid]
except IndexError:
tg = time_generator(0)
self.time_generators.append(tg)
nx = tg.next()
else:
nx = x
dl = self.data_limits[plotid]
if self.force_track_x_flag or (track_x and (self.track_x_min or self.track_x_max)):
self._set_xlimits(nx, plotid)
if track_y and (self.track_y_min[plotid] or self.track_y_max[plotid]):
if not self.track_y_max[plotid]:
ma = None
else:
ma = self.cur_max[plotid]
if not self.track_y_min[plotid]:
mi = None
else:
mi = self.cur_min[plotid]
self.set_y_limits(max_=ma,
min_=mi,
pad='0.1',
plotid=plotid)
lim = -dl
new_xd = hstack((xd[lim:], [nx]))
new_yd = hstack((yd[lim:], [float(y)]))
plot.data.set_data(xn, new_xd)
plot.data.set_data(yn, new_yd)
self.cur_max[plotid] = max(self.cur_max[plotid], max(new_yd))
self.cur_min[plotid] = min(self.cur_min[plotid], min(new_yd))
return nx
def record_multiple(self, ys, plotid=0, track_y=True):
tg = self.global_time_generator
if tg is None:
tg = time_generator(0)
self.global_time_generator = tg
x = tg.next()
for i, yi in enumerate(ys):
self.record(yi, x=x, series=i, track_x=False, track_y=track_y)
ma = max(ys)
mi = min(ys)
if ma < self.cur_max[plotid]:
self.cur_max[plotid] = -Inf
if mi > self.cur_min[plotid]:
self.cur_min[plotid] = Inf
self._set_xlimits(x, plotid=plotid)
return x
class StreamStackedGraph(StreamGraph, StackedGraph):
pass
if __name__ == '__main__':
from traits.has_traits import HasTraits
from traits.trait_types import Button
import random
from traitsui.view import View
class Demo(HasTraits):
test = Button
def _test_fired(self):
s = StreamGraph()
s.new_plot(scan_width=5)
s.new_series(type='scatter')
s.new_series(type='line', plotid=0)
s.new_series(type='line', plotid=0)
s.edit_traits()
self.g = s
do_after_timer(1000, self._iter)
def _iter(self):
st = time.time()
ys = [random.random(),random.random(),random.random()]
self.g.record_multiple(ys)
# self.g.record(random.random())
do_after_timer(999.5 - (time.time() - st) * 1000, self._iter)
def traits_view(self):
v = View('test')
return v
d = Demo()
d.configure_traits()
# ============= EOF ====================================
# def record_multiple(self, ys, plotid=0, scalar=1, track_x=True, **kw):
#
# tg = self.global_time_generator
# if tg is None:
# tg = time_generator(self.scan_delays[plotid])
# self.global_time_generator = tg
#
# x = tg.next() * scalar
# for i, yi in enumerate(ys):
# kw['track_x'] = False
# self.record(yi, x=x, series=i, **kw)
#
# ma = max(ys)
# mi = min(ys)
# if ma < self.cur_max[plotid]:
# self.cur_max[plotid] = -Inf
# if mi > self.cur_min[plotid]:
# self.cur_min[plotid] = Inf
#
# if track_x:
# # dl = self.data_limits[plotid]
# # mi = max(1, x - dl * self.scan_delays[plotid])
# # ma = max(x*1.05, mi+)
# sw = self.scan_widths[plotid]
# if sw:
# ma = max(x*1.05, sw)
# mi = 0
# if ma > sw:
# mi = ma-sw
# else:
# ma = None
# dl = self.data_limits[plotid]
# mi = max(1, x - dl * self.scan_delays[plotid])
#
# self.set_x_limits(max_=ma,
# min_=mi,
# plotid=plotid)
# return x
#
# def record(self, y, x=None, series=0, plotid=0,
# track_x=True, track_y=True, do_after=None, track_y_pad=5,
# aux=False, pad=0.1, **kw):
#
# xn, yn = self.series[plotid][series]
#
# plot = self.plots[plotid]
#
# xd = plot.data.get_data(xn)
# yd = plot.data.get_data(yn)
#
# if x is None:
# try:
# tg = self.time_generators[plotid]
# except IndexError:
# tg = time_generator(self.scan_delays[plotid])
# self.time_generators.append(tg)
#
# nx = tg.next()
# else:
# nx = x
#
# ny = float(y)
# # update raw data
# # rx = self.raw_x[plotid][series]
# # ry = self.raw_y[plotid][series]
# #
# # self.raw_x[plotid][series] = hstack((rx[MAX_LIMIT:], [nx]))
# # self.raw_y[plotid][series] = hstack((ry[MAX_LIMIT:], [ny]))
#
# dl = self.data_limits[plotid]
# sd = self.scan_delays[plotid]
# sw = self.scan_widths[plotid]
#
# pad = dl * pad
# # lim = MAX_LIMIT
# # pad = 100
# # print lim, nx, ny
# lim = -dl * sd - 1000
# new_xd = hstack((xd[lim:], [nx]))
# new_yd = hstack((yd[lim:], [ny]))
# # print new_xd
# self.cur_max[plotid] = max(self.cur_max[plotid], max(new_yd))
# self.cur_min[plotid] = min(self.cur_min[plotid], min(new_yd))
#
# def _record_():
# if track_x and (self.track_x_min or self.track_x_max) \
# or self.force_track_x_flag:
# ma = new_xd[-1]
# if not sw:
# sd = self.scan_delays[plotid]
# mi = ma - dl * sd + pad
# if self.force_track_x_flag or \
# ma >= dl * sd - pad:
#
# if self.force_track_x_flag:
# self.force_track_x_flag = False
# ma = dl * sd
#
# if not self.track_x_max:
# ma = None
# else:
# ma += pad
#
# if not self.track_x_min:
# mi = None
# else:
# mi = max(1, mi)
# else:
# ma = max(ma*1.05, sw)
# mi = 0
# if ma > sw:
# mi = ma-sw
#
# self.set_x_limits(max_=ma,
# min_=mi,
# plotid=plotid)
#
# if track_y and (self.track_y_min[plotid] or self.track_y_max[plotid]):
# if isinstance(track_y, tuple):
# mi, ma = track_y
# if ma is None:
# ma = self.cur_max[plotid]
#
# if mi is None:
# mi = self.cur_min[plotid]
#
# else:
# if not self.track_y_max[plotid]:
# ma = None
# else:
# ma = self.cur_max[plotid]
#
# if not self.track_y_min[plotid]:
# mi = None
# else:
# mi = self.cur_min[plotid]
# self.set_y_limits(max_=ma,
# min_=mi,
# plotid=plotid,
# pad=track_y_pad,
# force=False
# )
#
# if aux:
# self.add_datum_to_aux_plot((nx, ny), plotid, series)
# else:
# plot.data.set_data(xn, new_xd)
# plot.data.set_data(yn, new_yd)
# # self.redraw()
#
# if do_after:
# do_after_timer(do_after, _record_)
# else:
# _record_()
#
# return nx
| [
"jirhiker@gmail.com"
] | jirhiker@gmail.com |
4ab772d063000b2f73af815811ee2b7718613d44 | 57fa7d6820ca63d14f6568adb9185b8a8ea47589 | /ViewClass/ViewClass/settings.py | 26d3ce9d89c07c63796efac306a53f7eadde218c | [] | no_license | kunjabijukchhe/Web-Development | 0dd0a5f415adb863f96c1552d90b6b7a282b6945 | e9bd5c5cc4b0f12f2f1714986c612494be9ab8ea | refs/heads/master | 2020-09-23T04:43:39.118376 | 2020-07-21T08:21:30 | 2020-07-21T08:21:30 | 225,405,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,174 | py | """
Django settings for ViewClass project.
Generated by 'django-admin startproject' using Django 2.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_DIR=os.path.join(BASE_DIR,'templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '761kx+tbqwnwrojw3&vzx11$1!%6f+yrh&8yb5v^q%!ky=3if='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'reviseApp',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ViewClass.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR,],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ViewClass.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"bijukchhekunja@gamil.com"
] | bijukchhekunja@gamil.com |
532491ab518da9198fb188f47602c394dd47c69a | f4c4546f21046ddfd109a0dd34005ac4872f123d | /Django/app05/manage.py | b1e26b065ae23445230da70398a74cee7225113a | [] | no_license | wuzhisheng/Python | ebbeacc81881b65d6085b8f4bf10e15a25549ab0 | 84930c2eb5fb0397d546819effef0d879055e2c8 | refs/heads/master | 2022-01-17T14:57:08.926481 | 2021-12-31T09:52:01 | 2021-12-31T09:52:01 | 146,844,250 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 625 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'app05.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"806215829@qq.com"
] | 806215829@qq.com |
353478d1c692ca136b3015ce5ce4e0220ef2efe4 | 49f23f530d0cda7aadbb27be9c5bdefaa794d27f | /server/common_models/__init__.py | 4b63cc4511ffc6f06c746a09ef3301a5c735fe58 | [
"MIT"
] | permissive | Soopro/totoro | 198f3a51ae94d7466136ee766be98cb559c991f1 | 6be1af50496340ded9879a6450c8208ac9f97e72 | refs/heads/master | 2020-05-14T09:22:21.942621 | 2019-08-03T20:55:23 | 2019-08-03T20:55:23 | 181,738,167 | 0 | 1 | MIT | 2019-10-29T13:43:24 | 2019-04-16T17:42:16 | Python | UTF-8 | Python | false | false | 191 | py | # coding=utf-8
from __future__ import absolute_import
from .user import *
from .media import *
from .book import *
from .category import *
from .configuration import *
from .notify import *
| [
"redy.ru@gmail.com"
] | redy.ru@gmail.com |
1ab5eea08126195608143cbf70088bb0be88adc8 | ae844174eff5d14b8627ef8b32e66713f03772c8 | /Notes/Lec19/testing with nose.py | 86c5c3fa2283cf61d402cbbe75c779e3211c39b5 | [] | no_license | tayloa/CSCI1100_Fall2015 | 1bd6250894083086437c7defceddacf73315b83b | 4ca1e6261e3c5d5372d3a097cb6c8601a2a8c1c6 | refs/heads/master | 2021-01-22T22:43:55.301293 | 2017-05-30T04:52:21 | 2017-05-30T04:52:21 | 92,784,700 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | import nose
from search import *
def tetst_st1_1():
assert str1([4,1,3,2]) == (1,2)
def tetst_st1_1():
assert str1([1,2,3,4]) ==
def tetst_st1_1():
assert str1([1,2,3,1]) | [
"halfnote1004@gmail.com"
] | halfnote1004@gmail.com |
7ccab5bc4ae69ea234e05faaef3b39949464da00 | 7000895fad6f4c23084122ef27b3292d5e57df9f | /tests/core/test_TransactionMetadata.py | be05089d68a8de1c9fc5e458eb05f7072f56148b | [
"MIT"
] | permissive | jack3343/xrd-core | 1302cefe2a231895a53fcef73e558cdbc1196884 | 48a6d890d62485c627060b017eadf85602268caf | refs/heads/master | 2022-12-15T07:36:16.618507 | 2020-08-27T09:21:36 | 2020-08-27T09:21:36 | 290,652,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,229 | py | # coding=utf-8
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
from unittest import TestCase
from mock import MagicMock
from xrd.core import config
from xrd.core.TransactionMetadata import TransactionMetadata
from xrd.core.misc import logger, db
from xrd.core.State import State
from xrd.core.txs.TransferTransaction import TransferTransaction
from xrd.core.Block import Block
from tests.misc.helper import set_xrd_dir, get_alice_xmss, get_some_address
logger.initialize_default()
class TestTransactionMetadata(TestCase):
def setUp(self):
with set_xrd_dir('no_data'):
self.state = State()
self.m_db = MagicMock(name='mock DB', autospec=db.DB)
def test_rollback_tx_metadata(self):
alice_xmss = get_alice_xmss()
tx1 = TransferTransaction.create(addrs_to=[get_some_address(1), get_some_address(2)],
amounts=[1, 2],
message_data=None,
fee=0,
xmss_pk=alice_xmss.pk)
block = Block.create(dev_config=config.dev,
block_number=5,
prev_headerhash=b'',
prev_timestamp=10,
transactions=[tx1],
miner_address=b'',
seed_height=0,
seed_hash=None)
TransactionMetadata.update_tx_metadata(self.state, block=block, batch=None)
tx_metadata = TransactionMetadata.get_tx_metadata(self.state, tx1.txhash)
self.assertEqual(tx_metadata[0].to_json(), tx1.to_json())
TransactionMetadata.rollback_tx_metadata(self.state, block, None)
self.assertIsNone(TransactionMetadata.get_tx_metadata(self.state, tx1.txhash))
def test_update_tx_metadata(self):
alice_xmss = get_alice_xmss()
tx = TransferTransaction.create(addrs_to=[get_some_address(1), get_some_address(2)],
amounts=[1, 2],
message_data=None,
fee=0,
xmss_pk=alice_xmss.pk)
block_number = 5
TransactionMetadata.put_tx_metadata(self.state, tx, block_number, 10000, None)
tx_metadata = TransactionMetadata.get_tx_metadata(self.state, tx.txhash)
self.assertEqual(tx_metadata[0].to_json(), tx.to_json())
self.assertEqual(tx_metadata[1], block_number)
def test_remove_tx_metadata(self):
self.assertIsNone(TransactionMetadata.get_tx_metadata(self.state, b'test1'))
alice_xmss = get_alice_xmss()
tx = TransferTransaction.create(addrs_to=[get_some_address(1), get_some_address(2)],
amounts=[1, 2],
message_data=None,
fee=0,
xmss_pk=alice_xmss.pk)
block_number = 5
TransactionMetadata.put_tx_metadata(self.state, tx, block_number, 10000, None)
tx_metadata = TransactionMetadata.get_tx_metadata(self.state, tx.txhash)
self.assertEqual(tx_metadata[0].to_json(), tx.to_json())
self.assertEqual(tx_metadata[1], block_number)
TransactionMetadata.remove_tx_metadata(self.state, tx, None)
self.assertIsNone(TransactionMetadata.get_tx_metadata(self.state, tx.txhash))
def test_put_tx_metadata(self):
self.assertIsNone(TransactionMetadata.get_tx_metadata(self.state, b'test1'))
alice_xmss = get_alice_xmss()
tx = TransferTransaction.create(addrs_to=[get_some_address(1), get_some_address(2)],
amounts=[1, 2],
message_data=None,
fee=0,
xmss_pk=alice_xmss.pk)
block_number = 5
TransactionMetadata.put_tx_metadata(self.state, tx, block_number, 10000, None)
tx_metadata = TransactionMetadata.get_tx_metadata(self.state, tx.txhash)
self.assertEqual(tx_metadata[0].to_json(), tx.to_json())
self.assertEqual(tx_metadata[1], block_number)
def test_get_tx_metadata(self):
self.assertIsNone(TransactionMetadata.get_tx_metadata(self.state, b'test1'))
alice_xmss = get_alice_xmss()
tx = TransferTransaction.create(addrs_to=[get_some_address(1), get_some_address(2)],
amounts=[1, 2],
message_data=None,
fee=0,
xmss_pk=alice_xmss.pk)
block_number = 5
timestamp = 10000
TransactionMetadata.put_tx_metadata(self.state, tx, block_number, timestamp, None)
tx_metadata = TransactionMetadata.get_tx_metadata(self.state, tx.txhash)
self.assertEqual(tx_metadata[0].to_json(), tx.to_json())
self.assertEqual(tx_metadata[1], block_number)
| [
"70303530+jack3343@users.noreply.github.com"
] | 70303530+jack3343@users.noreply.github.com |
0b4fbe315a453bb13ddfe9bc6c4cee6d06ad3d86 | 27b86f422246a78704e0e84983b2630533a47db6 | /exploration/tools/diff_dxf_files.py | 28162397f11ab0d070fc9d40ac52f5a90195c50b | [
"MIT"
] | permissive | mozman/ezdxf | 7512decd600896960660f0f580cab815bf0d7a51 | ba6ab0264dcb6833173042a37b1b5ae878d75113 | refs/heads/master | 2023-09-01T11:55:13.462105 | 2023-08-15T11:50:05 | 2023-08-15T12:00:04 | 79,697,117 | 750 | 194 | MIT | 2023-09-14T09:40:41 | 2017-01-22T05:55:55 | Python | UTF-8 | Python | false | false | 1,658 | py | # Copyright (c) 2023, Manfred Moitzi
# License: MIT License
from typing import Optional, Iterable
from ezdxf.lldxf.tags import Tags
from ezdxf.lldxf.tagger import tag_compiler
from ezdxf.tools.rawloader import raw_structure_loader
from ezdxf.tools.difftags import diff_tags, print_diff, OpCode
FILE1 = r"C:\Users\mozman\Desktop\Outbox\906_polylines.dxf"
FILE2 = r"C:\Users\mozman\Desktop\Outbox\906_copy.dxf"
def get_handle(tags: Tags):
try:
return tags.get_handle()
except ValueError:
return "0"
def cmp_section(sec1, sec2):
for e1 in sec1:
handle = get_handle(e1)
if handle is None or handle == "0":
continue
e2 = entity_tags(sec2, handle)
if e2 is None:
print(f"entity handle #{handle} not found in second file")
continue
e1 = Tags(tag_compiler(iter(e1)))
a, b = e2, e1
diff = list(diff_tags(a, b, ndigits=6))
has_diff = any(op.opcode != OpCode.equal for op in diff)
if has_diff:
print("-"*79)
print(f"comparing {e1.dxftype()}(#{handle})")
print_diff(a, b, diff)
def cmp_dxf_files(filename1: str, filename2: str):
doc1 = raw_structure_loader(filename1)
doc2 = raw_structure_loader(filename2)
for section in ["TABLES", "BLOCKS", "ENTITIES", "OBJECTS"]:
cmp_section(doc1[section], doc2[section])
def entity_tags(entities: Iterable[Tags], handle: str) -> Optional[Tags]:
for e in entities:
if get_handle(e) == handle:
return Tags(tag_compiler(iter(e)))
return None
if __name__ == "__main__":
cmp_dxf_files(FILE1, FILE2)
| [
"me@mozman.at"
] | me@mozman.at |
656f52ac462cf2a6b5c44a4cfb10a5417245ad90 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/9/usersdata/82/5635/submittedfiles/crianca.py | a715f37838ff16a13db79b479c9046f643398dd8 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | # -*- coding: utf-8 -*-
from __future__ import division
#ENTRADA
P1 = input ('Digite o valor de P1:')
C1 = input ('Digite o valor de C1:')
P2 = input ('Digite o valor de P2:')
C2 = input ('Digite o valor de C2:')
#PROCESSAMNETO E SAIDA | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
26893bbbc95fc0c9dadd5b947bef410efc052c39 | 40b0af3099e8c2d0ce30d8ecce842ea8122be8b2 | /BASIC SYNTAX, CONDITIONAL STATEMENTS AND LOOPS/number_between_1_100.py | 6e45b2c407889b183c8950d48ab3ad6b8688ba39 | [] | no_license | milenpenev/Fundamentals | 6c1b1f5e26c4bf4622b1de6a4cc9a3b6a1f7a44b | 7754d1e4fba9e8f24b723f434f01f252c137eb60 | refs/heads/main | 2023-04-08T12:58:23.899238 | 2021-04-03T07:42:48 | 2021-04-03T07:42:48 | 350,467,280 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | number = float(input())
while number < 1 or number > 100:
number = float(input())
print(f"The number {number} is between 1 and 100") | [
"milennpenev@gmail.com"
] | milennpenev@gmail.com |
6988bd8c6b1fe3ef303ee68b28980a98319aebca | 69ed8c9140be238fe3499dfbd27ab4a3fcfcbec8 | /webempresa/blog/views.py | bccc7d66a0fe0be9734e9a26fefacd9dfe9ad3d8 | [] | no_license | Garavirod/CoffeeWeb | b8b1f3fa2373b2e45dbc393589edd18c17964f8c | e2250a416d0f2bceb3a68617133ae212d835a26c | refs/heads/master | 2022-12-18T23:40:26.337613 | 2019-08-01T19:18:38 | 2019-08-01T19:18:38 | 187,440,808 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 839 | py | from django.shortcuts import render, get_object_or_404
from .models import Post, Category
# Create your views here.
def blog(request):
posts = Post.objects.all()
return render(request,"blog/blog.html",{'posts':posts})
def category(request,category_id):
#el 'get' nos permite obtener un único registro filtrado ppor una serie de campos por el parametro
# category = Category.objects.get(id=category_id)
# Los paraámetros serán el modelo y el identificador de la categoría
#Buscamos a la inversa todas la entradas que tengan asignada esta categoría
category = get_object_or_404(Category,id=category_id) #Nos muestra el error 404 según el modelo
posts = Post.objects.filter(categories=category) #Filatra los datos por categoría
return render(request,"blog/category.html",{'category':category}) | [
"rodrigogarciaavila26@gmail.com"
] | rodrigogarciaavila26@gmail.com |
fbaa6d49368db0becbe90cabc3be773834120c82 | ad6eb2236acdf525c10af6c1cf62e877039301c2 | /lfs_order_numbers/models.py | 0ec1751dcead67933630dd17b9a6b1905885d42a | [] | no_license | diefenbach/lfs-order-numbers | 1dad836eded78d830cd6af79d1ce3fa2b9357640 | f9c3342dc7ebedfa286ac927ba84b433c2cbbc80 | refs/heads/master | 2021-05-25T11:14:21.547104 | 2017-02-23T10:12:19 | 2017-02-23T10:12:19 | 4,285,159 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 911 | py | from django.db import models
from django.utils.translation import ugettext_lazy as _
from lfs.plugins import OrderNumberGenerator as Base
class OrderNumberGenerator(Base):
"""
Generates order numbers and saves the last one.
**Attributes:**
last
The last stored/returned order number.
format
The format of the integer part of the order number.
"""
last = models.IntegerField(_(u"Last Order Number"), default=0)
format = models.CharField(_(u"Format"), blank=True, max_length=20)
def get_next(self, formatted=True):
"""Returns the next order number.
**Parameters:**
formatted
If True the number will be returned within the stored format.
"""
self.last += 1
self.save()
if formatted and self.format:
return self.format % self.last
else:
return self.last
| [
"kai.diefenbach@iqpp.de"
] | kai.diefenbach@iqpp.de |
d2921fe8f0ce8ec4fb95b0e4aae8a9b2d90db54d | 19380415ccdcb0dac20f7bd67fcc8a0f631a3b90 | /models/union-find.py | c1b9a401d367ca82e8e843172825c2ccacca6e0c | [
"MIT"
] | permissive | italo-batista/problems-solving | c06c811364db7439d842db76e743dd7a1a7c8365 | f83ad34f0abebd52925c4020635556f20743ba06 | refs/heads/master | 2021-10-28T07:01:21.643218 | 2019-04-22T15:27:19 | 2019-04-22T15:27:19 | 76,066,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 698 | py | # n : numero de nos
# q : numero de operacoes
n, q = map(int, raw_input().split())
parents = range(n+1)
sizes = [1] * (n+1)
def get_parent(x):
if x == parents[x]:
return parents[x]
else:
parents[x] = get_parent(parents[x])
return parents[x]
def same_set(x, y):
return get_parent(x) == get_parent(y)
def connect(x, y):
if not same_set(x, y):
parent_x = get_parent(x)
parent_y = get_parent(y)
if sizes[parent_x] > sizes[parent_y]:
parents[parent_y] = parent_x
sizes[parent_x] += sizes[parent_y]
else:
parents[parent_x] = parent_y
sizes[parent_y] += sizes[parent_x]
def get_size(x):
return sizes[get_parent(x)]
| [
"italo.batista@ccc.ufcg.edu.br"
] | italo.batista@ccc.ufcg.edu.br |
d13ae0b389a9aad24520238844270163decc9f47 | c2e06926e58e49e2659c77ec454716ccb42bd729 | /Test3/hawc2/2_postPro.py | 970866b6948734ae8d86a446cdf8f6be3f9fcab6 | [] | no_license | ptrbortolotti/BeamDyn_CpLambda | 72bfd6c831ebc5b86fdbc1f3dd10b3c05e693141 | e2f9a70044f7c0f1e720d828949faf1d392872c6 | refs/heads/main | 2023-08-28T22:34:21.546502 | 2021-09-27T17:34:01 | 2021-09-27T17:34:01 | 331,133,603 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,381 | py | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Local
import weio
from welib.fast.fastlib import find_matching_pattern, averageDF
# --- Parameters
IPostPro=[0,1]
simDir='cases'
BD_mainfile = '../load_conv/f_1e5/Box_Beam_SCALED_1_BeamDyn.dat'
load = 500000;
vf = np.array([0.2,0.4,0.6,0.8,1.0,2.0,4.0])*load;
vf.sort()
# --- Derived params
bdLine = weio.read(BD_mainfile).toDataFrame()
kp_x = bdLine['kp_xr_[m]'].values
kp_y = bdLine['kp_yr_[m]'].values
# Hawc2 = BeamDyn
x = -kp_y
y = kp_x
z = bdLine['kp_zr_[m]'].values
nSpan=len(z)
if 0 in IPostPro:
# --- Loop on outputs and extract deflections
for isim, load in enumerate(vf):
outfilename = os.path.join(simDir,'f_{:5.1e}.dat'.format(load))
print(outfilename)
df = weio.read(outfilename).toDataFrame()
dfAvg = averageDF(df,avgMethod='constantwindow',avgParam=2.0)
colsX, sIdx = find_matching_pattern(df.columns, 'N(\d+)xb')
colsY, sIdx = find_matching_pattern(df.columns, 'N(\d+)yb')
colsZ, sIdx = find_matching_pattern(df.columns, 'N(\d+)zb')
Icol = [int(s) for s in sIdx]
if len(colsX)!=nSpan:
raise Exception('Number of columns dont match. Make this script more general or adapt')
u=np.zeros((3,nSpan))
for i,(cx,cy,cz,id) in enumerate(zip(colsX,colsY,colsZ,Icol)):
if i+1!=id:
raise Exception('Index mismatch, columns are not sorted')
u[:,i]=[dfAvg[cx]-x[i] ,dfAvg[cy]-y[i] ,dfAvg[cz]-z[i]]
fig,axes = plt.subplots(3, 1, sharex=True, figsize=(6.4,4.8)) # (6.4,4.8)
fig.subplots_adjust(left=0.12, right=0.95, top=0.95, bottom=0.11, hspace=0.20, wspace=0.20)
for i,(ax,sc) in enumerate(zip(axes.ravel(),['x','y','z'])):
ax.plot(z, u[i,:]*1000) #, label=r'$u_{}$'.format(sc))
ax.set_ylabel(r'$u_{}$ [mm]'.format(sc))
ax.tick_params(direction='in')
ax.set_xlabel('Span [m]')
#plt.show()
fig.savefig(outfilename.replace('.dat','.png'))
cols=['r_[m]','u_x_[m]','u_y_[m]','u_z_[m]']
data =np.column_stack((z,u.T))
dfOut = pd.DataFrame(columns=cols, data=data)
dfOut.to_csv(outfilename.replace('.dat','.csv'), index=False, sep=',')
if 1 in IPostPro:
# --- Loop on csv and extract tip deflections
utip=np.zeros((3,len(vf)))
for isim, load in enumerate(vf):
outfilename = os.path.join(simDir,'f_{:5.1e}.csv'.format(load))
df=weio.read(outfilename).toDataFrame()
utip[:,isim] = [df['u_x_[m]'].values[-1], df['u_y_[m]'].values[-1], df['u_z_[m]'].values[-1]]
cols=['f_[N]','u_x_[m]','u_y_[m]','u_z_[m]']
data =np.column_stack((vf,utip.T))
dfOut = pd.DataFrame(columns=cols, data=data)
dfOut.to_csv('tiploads3.csv', index=False, sep='\t')
fig,axes = plt.subplots(3, 1, sharex=True, figsize=(6.4,4.8)) # (6.4,4.8)
fig.subplots_adjust(left=0.12, right=0.95, top=0.95, bottom=0.11, hspace=0.20, wspace=0.20)
for i,(ax,sc) in enumerate(zip(axes.ravel(),['x','y','z'])):
ax.plot(np.arange(len(vf))+1, utip[i,:]*1000) #, label=r'$u_{}$'.format(sc))
ax.set_ylabel(r'$u_{}$ [mm]'.format(sc))
ax.tick_params(direction='in')
ax.set_xlabel('Load i')
fig.savefig('tiploads3.png')
if __name__ == '__main__':
pass
| [
"emmanuel.branlard@nrel.gov"
] | emmanuel.branlard@nrel.gov |
4978a0b12d379e159f7293cb2652dda29e4c98b6 | 3b76f9f2317e1eb2cd9553cab0b4dd01ce216ad5 | /using nested list find the second lower score using python.py | bbbf48ebc25de5392d90b5cabd6ffa32747cf7f2 | [] | no_license | KaziMotiour/Hackerrank-problem-solve-with-python | f12ea978c5274a90745545d3d2c9fb6a4f9b5230 | 798ce2a6c2b63ea24dc28a923bfee4b528fb2b5e | refs/heads/master | 2022-05-26T19:45:44.808451 | 2020-05-05T09:44:40 | 2020-05-05T09:44:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 731 | py | arr=[]
n=int(input())
list=[[input(),float(input())] for _ in range(n)]
x=10000.0
a=0
i=0
j=1
while(n!=0):
if(list[i][j]<x ):
x=list[i][j]
i+=1
n-=1
a+=1
else:
i+=1
n-=1
a+=1
i=0
j=1
y=1000.0
p=0
for i in range(len(list)):
if(list[i][j]>x):
if(list[i][j]<y):
y=list[i][j]
z = list[i][j]
i+=1
a-=1
n+=1
else:
pass
else:
i+=1
a-=1
n+=1
i=0
j=1
for i in range(len(list)):
if list[i][j]==y:
arr.append(list[i][j-1])
i+=1
p+=1
else:
i+=1
a+=1
arr.sort()
for i in range(len(arr)):
print(arr[i])
| [
"kmatiour30@gmail.com"
] | kmatiour30@gmail.com |
b29bf0c42c3bd7b3ef74fd50f2d5c415917e4666 | 6dbf099660ee82b72fb2526a3dc242d99c5fb8c8 | /tests/standalone/PmwUsing.py | b763dd902383fb55b22258463d256f19f0b49337 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | Hellebore/Nuitka | 3544af691bc352769858ec1d44b6e9de46087bcf | 252d8e5d24521f8fff38142aa66c6b9063151f57 | refs/heads/develop | 2021-01-06T15:33:49.111250 | 2020-02-18T14:24:49 | 2020-02-18T14:24:49 | 241,380,473 | 0 | 0 | Apache-2.0 | 2020-07-11T17:52:04 | 2020-02-18T14:21:01 | Python | UTF-8 | Python | false | false | 1,038 | py | # Copyright 2020, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Python test originally created or extracted from other peoples work. The
# parts from me are licensed as below. It is at least Free Software where
# it's copied from other people. In these cases, that will normally be
# indicated.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import Pmw
# nuitka-skip-unless-expression: __import__("Tkinter" if sys.version_info[0] < 3 else "tkinter")
# nuitka-skip-unless-imports: Pmw
| [
"kay.hayen@gmail.com"
] | kay.hayen@gmail.com |
2a8156a8a96ce1b6fba8ac57fb61505dc07461e3 | c90c88f662ca3f6294ae7d5b7adb04831a2c01d9 | /WalletCenter/alembic/env.py | df09303374a28e8df4b7f0812639d3baf82e3c2b | [] | no_license | BigJeffWang/blockchain-py | a41512fbd52b182306ea00607e6b93871d5aa04d | 9d2abf10e9ff5a4ed7203564026919c1e2bd088a | refs/heads/master | 2021-07-20T22:52:13.496892 | 2020-08-05T09:54:18 | 2020-08-05T09:54:18 | 203,220,932 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,856 | py | from __future__ import with_statement
import sys
from pathlib import Path
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
sys.path.append(str(Path(__file__).resolve().parent.parent))
from tools.mysql_tool import MysqlTools
from models import base_model
from models import __alembic__
__alembic__.call_dynamic()
config = context.config
fileConfig(config.config_file_name)
connect_string = MysqlTools().get_connect_string()
config.set_main_option('sqlalchemy.url', connect_string)
target_metadata = base_model.BaseModel.metadata
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url, target_metadata=target_metadata, literal_binds=True)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| [
"bigjeffwang@163.com"
] | bigjeffwang@163.com |
7949133cedd8e3d1b586ebe734d7d955c10c523e | 088ae8015799ea4cbe4fb0b9b247e9cda8a1a24c | /discovery-provider/src/eth_indexing/event_scanner.py | 49c506c4aa0cb8d42e4d7f166a72a8cadd4e431e | [
"Apache-2.0"
] | permissive | eteryko/audius-protocol | 3e0278dec32f978d9042fcb4ebce3463c8cbee64 | a17be23e32d80c393bc052ffe0e31a99f97bde42 | refs/heads/master | 2023-08-25T05:00:38.590767 | 2021-10-23T03:11:59 | 2021-10-23T03:11:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,818 | py | import datetime
import time
import logging
from typing import Tuple, Iterable, Union, Type, TypedDict, Any
from sqlalchemy import or_
from web3 import Web3
from web3.contract import Contract, ContractEvent
from web3.exceptions import BlockNotFound
from web3.types import BlockIdentifier
# Currently this method is not exposed over official web3 API,
# but we need it to construct eth_get_logs parameters
from web3._utils.filters import construct_event_filter_params
from web3._utils.events import get_event_data
from eth_abi.codec import ABICodec
from src.models.models import AssociatedWallet, EthBlock, User
from src.queries.get_balances import enqueue_immediate_balance_refresh
logger = logging.getLogger(__name__)
# version 2 added to reset existing last indexed eth block
eth_indexing_last_scanned_block_key = "eth_indexing_last_scanned_block_2"
# How many times we try to re-attempt a failed JSON-RPC call
MAX_REQUEST_RETRIES = 30
# Delay between failed requests to let JSON-RPC server to recover
REQUEST_RETRY_SECONDS = 3
# Minimum number of blocks to scan for our JSON-RPC throttling parameters
MIN_SCAN_CHUNK_SIZE = 10
# How many maximum blocks at the time we request from JSON-RPC
# and we are unlikely to exceed the response size limit of the JSON-RPC server
MAX_CHUNK_SCAN_SIZE = 10000
# Factor how was we increase chunk size if no results found
CHUNK_SIZE_INCREASE = 2
# initial number of blocks to scan, this number will increase/decrease as a function of whether transfer events have been found within the range of blocks scanned
START_CHUNK_SIZE = 20
# how many blocks from tail of chain we want to scan to
ETH_BLOCK_TAIL_OFFSET = 1
# the block number to start with if first time scanning
# this should be the first block during and after which $AUDIO transfer events started occurring
MIN_SCAN_START_BLOCK = 11103292
class TransferEvent(TypedDict):
logIndex: int
transactionHash: Any
blockNumber: int
args: Any
class EventScanner:
"""Scan blockchain for events and try not to abuse JSON-RPC API too much.
Can be used for real-time scans, as it detects minor chain reorganisation and rescans.
Unlike the easy web3.contract.Contract, this scanner can scan events from multiple contracts at once.
For example, you can get all transfers from all tokens in the same scan.
You *should* disable the default `http_retry_request_middleware` on your provider for Web3,
because it cannot correctly throttle and decrease the `eth_get_logs` block number range.
"""
def __init__(
self,
db,
redis,
web3: Web3,
contract: Type[Contract],
event_type: Type[ContractEvent],
filters: dict,
):
"""
:param db: database handle
:param redis: redis handle
:param web3: Web3 instantiated with provider url
:param contract: Contract
:param state: state manager to keep tracks of last scanned block and persisting events to db
:param event_type: web3 Event we scan
:param filters: Filters passed to get_logs e.g. { "address": <token-address> }
"""
self.logger = logger
self.db = db
self.redis = redis
self.contract = contract
self.web3 = web3
self.event_type = event_type
self.filters = filters
self.last_scanned_block = MIN_SCAN_START_BLOCK
self.latest_chain_block = self.web3.eth.blockNumber
def restore(self):
"""Restore the last scan state from redis.
If value not found in redis, restore from database."""
restored = self.redis.get(eth_indexing_last_scanned_block_key)
if not restored:
with self.db.scoped_session() as session:
result = session.query(EthBlock.last_scanned_block).first()
restored = result[0] if result else restored
self.last_scanned_block = int(restored) if restored else MIN_SCAN_START_BLOCK
logger.info(
f"event_scanner.py | Restored last scanned block ({self.last_scanned_block})"
)
def save(self, block_number: int):
"""Save at the end of each chunk of blocks, so we can resume in the case of a crash or CTRL+C
Next time the scanner is started we will resume from this block
"""
self.last_scanned_block = block_number
logger.info(
f"event_scanner.py | Saving last scanned block ({self.last_scanned_block}) to redis"
)
self.redis.set(
eth_indexing_last_scanned_block_key,
str(self.last_scanned_block),
)
with self.db.scoped_session() as session:
record = session.query(EthBlock).first()
if record:
record.last_scanned_block = self.last_scanned_block
else:
record = EthBlock(last_scanned_block=self.last_scanned_block)
session.add(record)
def get_block_timestamp(self, block_num) -> Union[datetime.datetime, None]:
"""Get Ethereum block timestamp"""
try:
block_info = self.web3.eth.getBlock(block_num)
except BlockNotFound:
# Block was not mined yet,
# minor chain reorganisation?
return None
last_time = block_info["timestamp"]
return datetime.datetime.utcfromtimestamp(last_time)
def get_suggested_scan_end_block(self):
"""Get the last mined block on Ethereum chain we are following."""
# Do not scan all the way to the final block, as this
# block might not be mined yet
return self.latest_chain_block - ETH_BLOCK_TAIL_OFFSET
def get_last_scanned_block(self) -> int:
"""The number of the last block we have stored."""
return self.last_scanned_block
def process_event(
self, block_timestamp: datetime.datetime, event: TransferEvent
) -> str:
"""Record a ERC-20 transfer in our database."""
# Events are keyed by their transaction hash and log index
# One transaction may contain multiple events
# and each one of those gets their own log index
log_index = event["logIndex"] # Log index within the block
# transaction_index = event.transactionIndex # Transaction index within the block
txhash = event["transactionHash"].hex() # Transaction hash
block_number = event["blockNumber"]
# Convert ERC-20 Transfer event to our internal format
args = event["args"]
transfer = {
"from": args["from"],
"to": args["to"],
"value": args["value"],
"timestamp": block_timestamp,
}
# Add user ids from the transfer event into the balance refresh queue.
# Depending on the wallet connection, we may have the address stored as
# lower cased, so to be safe, we refresh check-summed and lower-cased adddresses.
transfer_event_wallets = [
transfer["from"],
transfer["to"],
transfer["from"].lower(),
transfer["to"].lower(),
]
with self.db.scoped_session() as session:
user_result = (
session.query(User.user_id)
.filter(User.is_current == True)
.filter(User.wallet.in_(transfer_event_wallets))
).all()
user_set = {user_id for [user_id] in user_result}
associated_wallet_result = (
session.query(AssociatedWallet.user_id)
.filter(AssociatedWallet.is_current == True)
.filter(AssociatedWallet.is_delete == False)
.filter(AssociatedWallet.wallet.in_(transfer_event_wallets))
).all()
associated_wallet_set = {user_id for [user_id] in associated_wallet_result}
user_ids = list(user_set.union(associated_wallet_set))
if user_ids:
logger.info(
f"event_scanner.py | Enqueueing user ids {user_ids} to immediate balance refresh queue"
)
enqueue_immediate_balance_refresh(self.redis, user_ids)
# Return a pointer that allows us to look up this event later if needed
return f"{block_number}-{txhash}-{log_index}"
def scan_chunk(self, start_block, end_block) -> Tuple[int, list]:
"""Read and process events between to block numbers.
Dynamically decrease the size of the chunk in case the JSON-RPC server pukes out.
:return: tuple(actual end block number, when this block was mined, processed events)
"""
block_timestamps = {}
get_block_timestamp = self.get_block_timestamp
# Cache block timestamps to reduce some RPC overhead
# Real solution might include smarter models around block
def get_block_mined_timestamp(block_num):
if block_num not in block_timestamps:
block_timestamps[block_num] = get_block_timestamp(block_num)
return block_timestamps[block_num]
all_processed = []
# Callable that takes care of the underlying web3 call
def _fetch_events(from_block, to_block):
return _fetch_events_for_all_contracts(
self.web3,
self.event_type,
self.filters,
from_block=from_block,
to_block=to_block,
)
# Do `n` retries on `eth_get_logs`,
# throttle down block range if needed
end_block, events = _retry_web3_call(
_fetch_events, start_block=start_block, end_block=end_block
)
for evt in events:
idx = evt[
"logIndex"
] # Integer of the log index position in the block, null when its pending
# We cannot avoid minor chain reorganisations, but
# at least we must avoid blocks that are not mined yet
assert idx is not None, "Somehow tried to scan a pending block"
block_number = evt["blockNumber"]
# Get UTC time when this event happened (block mined timestamp)
# from our in-memory cache
block_timestamp = get_block_mined_timestamp(block_number)
logger.debug(
f'event_scanner.py | Processing event {evt["event"]}, block:{evt["blockNumber"]}'
)
processed = self.process_event(block_timestamp, evt)
all_processed.append(processed)
return end_block, all_processed
def estimate_next_chunk_size(self, current_chuck_size: int, event_found_count: int):
"""Try to figure out optimal chunk size
Our scanner might need to scan the whole blockchain for all events
* We want to minimize API calls over empty blocks
* We want to make sure that one scan chunk does not try to process too many entries once, as we try to control commit buffer size and potentially asynchronous busy loop
* Do not overload node serving JSON-RPC API by asking data for too many events at a time
Currently Ethereum JSON-API does not have an API to tell when a first event occured in a blockchain
and our heuristics try to accelerate block fetching (chunk size) until we see the first event.
These heurestics exponentially increase the scan chunk size depending on if we are seeing events or not.
When any transfers are encountered, we are back to scanning only a few blocks at a time.
It does not make sense to do a full chain scan starting from block 1, doing one JSON-RPC call per 20 blocks.
"""
if event_found_count > 0:
# When we encounter first events, reset the chunk size window
current_chuck_size = MIN_SCAN_CHUNK_SIZE
else:
current_chuck_size *= CHUNK_SIZE_INCREASE
current_chuck_size = max(MIN_SCAN_CHUNK_SIZE, current_chuck_size)
current_chuck_size = min(MAX_CHUNK_SCAN_SIZE, current_chuck_size)
return int(current_chuck_size)
def scan(
self,
start_block,
end_block,
start_chunk_size=START_CHUNK_SIZE,
) -> Tuple[list, int]:
"""Perform a token events scan.
:param start_block: The first block included in the scan
:param end_block: The last block included in the scan
:param start_chunk_size: How many blocks we try to fetch over JSON-RPC on the first attempt
:return: [All processed events, number of chunks used]
"""
current_block = start_block
# Scan in chunks, commit between
chunk_size = start_chunk_size
last_scan_duration = last_logs_found = 0
total_chunks_scanned = 0
# All processed entries we got on this scan cycle
all_processed = []
while current_block <= end_block:
# Print some diagnostics to logs to try to fiddle with real world JSON-RPC API performance
estimated_end_block = min(
current_block + chunk_size, self.get_suggested_scan_end_block()
)
logger.debug(
"event_scanner.py | Scanning token transfers for blocks: %d - %d, chunk size %d, last chunk scan took %f, last logs found %d",
current_block,
estimated_end_block,
chunk_size,
last_scan_duration,
last_logs_found,
)
start = time.time()
actual_end_block, new_entries = self.scan_chunk(
current_block, estimated_end_block
)
# Where does our current chunk scan ends - are we out of chain yet?
current_end = actual_end_block
last_scan_duration = int(time.time() - start)
all_processed += new_entries
# Try to guess how many blocks to fetch over `eth_get_logs` API next time
chunk_size = self.estimate_next_chunk_size(chunk_size, len(new_entries))
# Set where the next chunk starts
current_block = current_end + 1
total_chunks_scanned += 1
self.save(min(current_end, self.get_suggested_scan_end_block()))
return all_processed, total_chunks_scanned
def _retry_web3_call( # type: ignore
func,
start_block,
end_block,
retries=MAX_REQUEST_RETRIES,
delay=REQUEST_RETRY_SECONDS,
) -> Tuple[int, list]: # type: ignore
"""A custom retry loop to throttle down block range.
If our JSON-RPC server cannot serve all incoming `eth_get_logs` in a single request,
we retry and throttle down block range for every retry.
For example, Go Ethereum does not indicate what is an acceptable response size.
It just fails on the server-side with a "context was cancelled" warning.
:param func: A callable that triggers Ethereum JSON-RPC, as func(start_block, end_block)
:param start_block: The initial start block of the block range
:param end_block: The initial start block of the block range
:param retries: How many times we retry
:param delay: Time to sleep between retries
"""
for i in range(retries):
try:
return end_block, func(start_block, end_block)
except Exception as e:
# Assume this is HTTPConnectionPool(host='localhost', port=8545): Read timed out. (read timeout=10)
# from Go Ethereum. This translates to the error "context was cancelled" on the server side:
# https://github.com/ethereum/go-ethereum/issues/20426
if i < retries - 1:
# Give some more verbose info than the default middleware
logger.warning(
"event_scanner.py | Retrying events for block range %d - %d (%d) failed with %s, retrying in %s seconds",
start_block,
end_block,
end_block - start_block,
e,
delay,
)
# Decrease the `eth_get_blocks` range
end_block = start_block + ((end_block - start_block) // 2)
# Let the JSON-RPC to recover e.g. from restart
time.sleep(delay)
continue
else:
logger.warning("event_scanner.py | Out of retries")
raise
def _fetch_events_for_all_contracts(
web3,
event_type,
argument_filters: dict,
from_block: BlockIdentifier,
to_block: BlockIdentifier,
) -> Iterable:
"""Get events using eth_get_logs API.
This method is detached from any contract instance.
This is a stateless method, as opposed to createFilter.
It can be safely called against nodes which do not provide `eth_newFilter` API, like Infura.
"""
if from_block is None:
raise TypeError("Missing mandatory keyword argument to get_logs: fromBlock")
# Currently no way to poke this using a public Web3.py API.
# This will return raw underlying ABI JSON object for the event
abi = event_type._get_event_abi()
# Depending on the Solidity version used to compile
# the contract that uses the ABI,
# it might have Solidity ABI encoding v1 or v2.
# We just assume the default that you set on Web3 object here.
# More information here https://eth-abi.readthedocs.io/en/latest/index.html
codec: ABICodec = web3.codec
# Here we need to poke a bit into Web3 internals, as this
# functionality is not exposed by default.
# Construct JSON-RPC raw filter presentation based on human readable Python descriptions
# Namely, convert event names to their keccak signatures
# More information here:
# https://github.com/ethereum/web3.py/blob/e176ce0793dafdd0573acc8d4b76425b6eb604ca/web3/_utils/filters.py#L71
_, event_filter_params = construct_event_filter_params(
abi,
codec,
address=argument_filters.get("address"),
argument_filters=argument_filters,
fromBlock=from_block,
toBlock=to_block,
)
logger.debug(
"event_scanner.py | Querying eth_get_logs with the following parameters: %s",
event_filter_params,
)
# Call JSON-RPC API on your Ethereum node.
# get_logs() returns raw AttributedDict entries
logs = web3.eth.getLogs(event_filter_params)
# Convert raw binary data to Python proxy objects as described by ABI
all_events = []
for log in logs:
# Convert raw JSON-RPC log result to human readable event by using ABI data
# More information how processLog works here
# https://github.com/ethereum/web3.py/blob/fbaf1ad11b0c7fac09ba34baff2c256cffe0a148/web3/_utils/events.py#L200
event = get_event_data(codec, abi, log)
all_events.append(event)
return all_events
| [
"noreply@github.com"
] | eteryko.noreply@github.com |
070c87fe7edf4ced900434ea1aa417de27ed5dc7 | 8d976929239774fd21ab6d8cb55623c2d34dcf70 | /mnist/keras_func_replica_imagenet_inception.py | bed214d2f5e6d1d79781b3ccba1c44b22609087a | [] | no_license | ilyaperepelitsa/kaggle | 776c07c4643458b020bb18b998a6b79c77525c9a | eecf546079e70ab20c6ed8e19012e4ae67e16131 | refs/heads/master | 2021-01-09T05:47:29.192790 | 2019-02-09T21:02:08 | 2019-02-09T21:02:08 | 80,834,890 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,092 | py | import tensorflow as tf
import numpy as np
import tensorflow as tf
import json
import os
import pandas as pd
from keras.models import Sequential, Model
from keras.layers import Dense, Activation, Conv2D, MaxPooling2D, AveragePooling2D
from keras.layers import Dropout, Flatten, Input, Concatenate
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ReduceLROnPlateau
from keras.callbacks import ModelCheckpoint
import keras
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import itertools
from sklearn.metrics import confusion_matrix
from keras import backend as K
from keras.callbacks import TensorBoard
ROOT_PATH = os.path.join(os.path.dirname(os.path.abspath("__file__")), "mnist")
JSON_PATH = os.path.join(os.path.dirname(os.path.abspath("__file__")),"mnist", "tf_specs.json")
LOGS_PATH = os.path.join(ROOT_PATH, "logs")
SUMMARY_PATH = os.path.join(ROOT_PATH, "summary")
METRICS_PATH = os.path.join(ROOT_PATH, "metrics.json")
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def check_dir_create(path):
if os.path.exists(path):
pass
else:
os.mkdir(path)
check_dir_create(LOGS_PATH)
hyper_params = json.load(open(JSON_PATH))
working_params = hyper_params["keras_func_replica_imagenet_inception"]
LOGS_PATH = os.path.join(LOGS_PATH, working_params["logs"])
FINAL_LOGS = os.path.join(LOGS_PATH, "weights.best.hdf5")
EPOCHS = working_params["num_steps"]
RATE = working_params["dropout_keep_prob"]
BATCH_SIZE = working_params["minibatch_size"]
FLAT_SIZE = working_params["flat_size"]
LEARNING_RATE = working_params["learning_rate"]
def check_dir_create(path):
if os.path.exists(path):
pass
else:
os.mkdir(path)
check_dir_create(LOGS_PATH)
def get_batch(X, y, size):
a = np.random.choice(X.index, size, replace = False)
return X.loc[X.index.isin(a)], y.loc[y.index.isin(a)]
full_data = pd.read_csv(
os.path.join(
os.path.dirname(
os.path.abspath("__file__")
),'mnist/data/train.csv'
))
target_x = pd.read_csv(
os.path.join(
os.path.dirname(
os.path.abspath("__file__")
),'mnist/data/test.csv'
))
target_x.index+1
target_x = target_x.set_index(target_x.index + 1)
target_x.values.shape
x = full_data[full_data.columns[full_data.columns!="label"]]
y = pd.DataFrame(full_data["label"])
# no_classes = y.label.unique().shape[0]
# y = y.values
# y
encoder = OneHotEncoder()
encoder.fit(y)
y = pd.DataFrame(encoder.transform(y).toarray())
labels = encoder.active_features_
y
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# x_train.values.reshape(x_train.shape[0], 28, 28, 1).shape
#
# x_train = x_train.values.reshape(-1, 28, 28, 1)
# x_test = x_test.values.reshape(-1, 28, 28, 1)
#
# y_train = y_train.values
# y_test = y_test.values
#
# input_shape = (28, 28, 1)
if K.image_data_format() == 'channels_first': # Theano backend
x_train = x_train.values.reshape(x_train.shape[0], 1, 28, 28)
x_test = x_test.values.reshape(x_test.shape[0], 1, 28, 28)
input_shape = (1, 28, 28)
else: # Tensorflow backend
x_train = x_train.values.reshape(x_train.shape[0], 28, 28, 1)
x_test = x_test.values.reshape(x_test.shape[0], 28, 28, 1)
input_shape = (28, 28, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
input_size = x.shape[1]
# input_size
no_classes = len(labels)
K.clear_session()
reshaped_input = Input(shape = input_shape)
conv_1_1 = Conv2D(filters = 32, kernel_size = (1, 1),padding = 'Same',
activation ='relu',
kernel_initializer = "truncated_normal",
bias_initializer = "zeros")(reshaped_input)
conv_1_2 = Conv2D(filters = 32, kernel_size = (3,3),padding = 'Same',
activation ='relu',
kernel_initializer = "truncated_normal",
bias_initializer = "zeros")(conv_1_1)
pool_1_1 = MaxPooling2D(pool_size=(2,2))(conv_1_2)
conv_1_3 = Conv2D(filters = 64, kernel_size = (3, 3),padding = 'Same',
activation ='relu',
kernel_initializer = "truncated_normal",
bias_initializer = "zeros")(pool_1_1)
conv_1_4 = Conv2D(filters = 64, kernel_size = (5,5),padding = 'Same',
activation ='relu',
kernel_initializer = "truncated_normal",
bias_initializer = "zeros")(conv_1_3)
conv_1_5 = Conv2D(filters = 64, kernel_size = (7,7),padding = 'Same',
activation ='relu',
kernel_initializer = "truncated_normal",
bias_initializer = "zeros")(conv_1_4)
pool_1_2 = MaxPooling2D(pool_size=(2,2))(conv_1_5)
conv_1_6 = Conv2D(filters = 128, kernel_size = (3, 3),padding = 'Same',
activation ='relu',
kernel_initializer = "truncated_normal",
bias_initializer = "zeros")(pool_1_2)
conv_1_7 = Conv2D(filters = 128, kernel_size = (5,5),padding = 'Same',
activation ='relu',
kernel_initializer = "truncated_normal",
bias_initializer = "zeros")(conv_1_6)
conv_1_8 = Conv2D(filters = 128, kernel_size = (7,7),padding = 'Same',
activation ='relu',
kernel_initializer = "truncated_normal",
bias_initializer = "zeros")(conv_1_7)
pool_1_3 = MaxPooling2D(pool_size=(2,2))(conv_1_8)
drop_1_1 = Dropout(rate = 0.2)(pool_1_3)
flatten_1 = Flatten()(drop_1_1)
drop_1_2 = Dropout(rate = 0.2)(flatten_1)
output_1 = Dense(FLAT_SIZE, activation = "relu",
kernel_initializer = "truncated_normal",
bias_initializer = "zeros")(drop_1_2)
conv_2_1 = Conv2D(filters = 32, kernel_size = (1, 1),padding = 'Same',
activation ='relu',
kernel_initializer = "truncated_normal",
bias_initializer = "zeros")(reshaped_input)
conv_2_2 = Conv2D(filters = 32, kernel_size = (3,3),padding = 'Same',
activation ='relu',
kernel_initializer = "truncated_normal",
bias_initializer = "zeros")(conv_2_1)
pool_2_1 = AveragePooling2D(pool_size=(2,2))(conv_2_2)
conv_2_3 = Conv2D(filters = 64, kernel_size = (3, 3),padding = 'Same',
activation ='relu',
kernel_initializer = "truncated_normal",
bias_initializer = "zeros")(pool_2_1)
conv_2_4 = Conv2D(filters = 64, kernel_size = (5,5),padding = 'Same',
activation ='relu',
kernel_initializer = "truncated_normal",
bias_initializer = "zeros")(conv_2_3)
conv_2_5 = Conv2D(filters = 64, kernel_size = (7,7),padding = 'Same',
activation ='relu',
kernel_initializer = "truncated_normal",
bias_initializer = "zeros")(conv_2_4)
pool_2_2 = AveragePooling2D(pool_size=(2,2))(conv_2_5)
conv_2_6 = Conv2D(filters = 128, kernel_size = (3, 3),padding = 'Same',
activation ='relu',
kernel_initializer = "truncated_normal",
bias_initializer = "zeros")(pool_2_2)
conv_2_7 = Conv2D(filters = 128, kernel_size = (5,5),padding = 'Same',
activation ='relu',
kernel_initializer = "truncated_normal",
bias_initializer = "zeros")(conv_2_6)
conv_2_8 = Conv2D(filters = 128, kernel_size = (7, 7),padding = 'Same',
activation ='relu',
kernel_initializer = "truncated_normal",
bias_initializer = "zeros")(conv_2_7)
pool_2_3 = AveragePooling2D(pool_size=(2,2))(conv_2_8)
drop_2_1 = Dropout(rate = 0.2)(pool_2_3)
flatten_2 = Flatten()(drop_2_1)
drop_2_2 = Dropout(rate = 0.2)(flatten_2)
output_2 = Dense(FLAT_SIZE, activation = "relu",
kernel_initializer = "truncated_normal",
bias_initializer = "zeros")(drop_2_2)
tower_0 = Conv2D(64, (1,1), padding='same', activation='relu', use_bias=False,
kernel_initializer = "truncated_normal")(reshaped_input)
tower_1 = Conv2D(64, (1,1), padding='same', activation='relu', use_bias=False,
kernel_initializer = "truncated_normal")(reshaped_input)
tower_1 = Conv2D(64, (3,3), padding='same', activation='relu', use_bias=False,
kernel_initializer = "truncated_normal")(tower_1)
tower_2 = Conv2D(64, (1,1), padding='same', activation='relu', use_bias=False,
kernel_initializer = "truncated_normal")(reshaped_input)
tower_2 = Conv2D(64, (5,5), padding='same', activation='relu', use_bias=False,
kernel_initializer = "truncated_normal")(tower_2)
tower_3 = MaxPooling2D((3,3), strides=(1,1), padding='same')(reshaped_input)
tower_3 = Conv2D(64, (1,1), padding='same', activation='relu', use_bias=False,
kernel_initializer = "truncated_normal")(tower_3)
output_inception = keras.layers.concatenate([tower_0, tower_1, tower_2, tower_3], axis = 3)
tower_0_1 = Conv2D(64, (1,1), padding='same', activation='relu', use_bias=False,
kernel_initializer = "truncated_normal")(output_inception)
tower_1_1 = Conv2D(64, (1,1), padding='same', activation='relu', use_bias=False,
kernel_initializer = "truncated_normal")(output_inception)
tower_1_1 = Conv2D(64, (3,3), padding='same', activation='relu', use_bias=False,
kernel_initializer = "truncated_normal")(tower_1_1)
tower_2_1 = Conv2D(64, (1,1), padding='same', activation='relu', use_bias=False,
kernel_initializer = "truncated_normal")(output_inception)
tower_2_1 = Conv2D(64, (5,5), padding='same', activation='relu', use_bias=False,
kernel_initializer = "truncated_normal")(tower_2_1)
tower_3_1 = MaxPooling2D((3,3), strides=(1,1), padding='same')(output_inception)
tower_3_1 = Conv2D(64, (1,1), padding='same', activation='relu', use_bias=False,
kernel_initializer = "truncated_normal")(tower_3_1)
output_inception_1 = keras.layers.concatenate([tower_0_1, tower_1_1, tower_2_1, tower_3_1], axis = 3)
tower_0_2 = Conv2D(64, (1,1), padding='same', activation='relu', use_bias=False,
kernel_initializer = "truncated_normal")(output_inception_1)
tower_1_2 = Conv2D(64, (1,1), padding='same', activation='relu', use_bias=False,
kernel_initializer = "truncated_normal")(output_inception_1)
tower_1_2 = Conv2D(64, (3,3), padding='same', activation='relu', use_bias=False,
kernel_initializer = "truncated_normal")(tower_1_2)
tower_2_2 = Conv2D(64, (1,1), padding='same', activation='relu', use_bias=False,
kernel_initializer = "truncated_normal")(output_inception_1)
tower_2_2 = Conv2D(64, (5,5), padding='same', activation='relu', use_bias=False,
kernel_initializer = "truncated_normal")(tower_2_2)
tower_3_2 = MaxPooling2D((3,3), strides=(1,1), padding='same')(output_inception_1)
tower_3_2 = Conv2D(64, (1,1), padding='same', activation='relu', use_bias=False,
kernel_initializer = "truncated_normal")(tower_3_2)
output_inception_2 = keras.layers.concatenate([tower_0_2, tower_1_2, tower_2_2, tower_3_2], axis = 3)
tower_0_3 = Conv2D(64, (1,1), padding='same', activation='relu', use_bias=False,
kernel_initializer = "truncated_normal")(output_inception_2)
tower_1_3 = Conv2D(64, (1,1), padding='same', activation='relu', use_bias=False,
kernel_initializer = "truncated_normal")(output_inception_2)
tower_1_3 = Conv2D(64, (3,3), padding='same', activation='relu', use_bias=False,
kernel_initializer = "truncated_normal")(tower_1_3)
tower_2_3 = Conv2D(64, (1,1), padding='same', activation='relu', use_bias=False,
kernel_initializer = "truncated_normal")(output_inception_2)
tower_2_3 = Conv2D(64, (5,5), padding='same', activation='relu', use_bias=False,
kernel_initializer = "truncated_normal")(tower_2_3)
tower_3_3 = MaxPooling2D((3,3), strides=(1,1), padding='same')(output_inception_2)
tower_3_3 = Conv2D(64, (1,1), padding='same', activation='relu', use_bias=False,
kernel_initializer = "truncated_normal")(tower_3_3)
output_inception_3 = keras.layers.concatenate([tower_0_3, tower_1_3, tower_2_3, tower_3_3], axis = 3)
flatten_inception = Flatten()(output_inception_3)
output_inception_final = Dense(FLAT_SIZE, activation = "relu",
kernel_initializer = "truncated_normal",
bias_initializer = "zeros")(flatten_inception)
concat_layer = Concatenate(axis = -1)([output_1, output_2, output_inception_final])
drop = Dropout(rate = RATE)(concat_layer)
output = Dense(units = no_classes, activation = "softmax",
kernel_initializer = "truncated_normal",
bias_initializer = "truncated_normal")(drop)
### OUTPUT
model = Model(inputs = reshaped_input, outputs = output)
try:
model.load_weights(FINAL_LOGS)
except OSError:
pass
model.compile(loss = keras.losses.categorical_crossentropy,
optimizer = keras.optimizers.Adam(0.001),
metrics = ['accuracy'])
checkpoint = ModelCheckpoint(FINAL_LOGS, monitor='val_acc', verbose=0, save_best_only=True, mode='max')
tensorbard = TensorBoard(log_dir=os.path.join(LOGS_PATH, "board"), histogram_freq=100,
write_graph=True, write_images=True)
# tbCallback.set_model(model)
callbacks_list = [checkpoint, tensorbard]
# model.fit(x = x_train, y = y_train, batch_size = BATCH_SIZE, epochs = EPOCHS, validation_data = (x_test, y_test), verbose = 2)
# learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc',
# patience=3,
# verbose=1,
# factor=0.5,
# min_lr=0.0001)
datagen = ImageDataGenerator(
rescale=1./255,
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
# zca_whitening=True, # apply ZCA whitening
rotation_range=20, # randomly rotate images in the range (degrees, 0 to 180)
# zoom_range = 0.2, # Randomly zoom image
# shear_range = 0.2,
# width_shift_range=0.3, # randomly shift images horizontally (fraction of total width)
# height_shift_range=0.3, # randomly shift images vertically (fraction of total height)
horizontal_flip=False, # randomly flip images
vertical_flip=False) # randomly flip images
datagen.fit(x_train)
# h = model.fit_generator(datagen.flow(x_train,y_train, batch_size=BATCH_SIZE),
# epochs = EPOCHS, validation_data = (x_test,y_test),
# verbose = 2, steps_per_epoch=x_train.shape[0] // BATCH_SIZE
# , callbacks=[learning_rate_reduction],)
#
h = model.fit_generator(datagen.flow(x_train,y_train,
batch_size=BATCH_SIZE,
shuffle=True,
# save_to_dir = LOGS_PATH,
save_to_dir = None),
epochs = EPOCHS, validation_data = (x_test,y_test),
verbose = 1,
callbacks=callbacks_list)
y_pred = model.predict(x_test)
Y_pred_classes = np.argmax(y_pred, axis = 1)
Y_true = np.argmax(y_test.values, axis = 1)
# Y_true
# Y_true
# Y_pred_classes
confusion_mtx = confusion_matrix(Y_true, Y_pred_classes)
plot_confusion_matrix(confusion_mtx, classes = range(10))
train_loss, train_accuracy = model.evaluate(x_test, y_test, verbose = 0)
print("Train data loss: ", train_loss)
print("Train data accuracy: ", train_accuracy)
| [
"ilyaperepelitsa@gmail.com"
] | ilyaperepelitsa@gmail.com |
8d0b460bb292a9b4de4fb6d227c3d54b4b4daa8f | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/abc036/A/4928401.py | 7ff86494dc8fdbfefcd617e792b589f929e8cd7a | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 54 | py | A, B = map(int, input().split())
print(B//A+(B%A!=0)) | [
"kwnafi@yahoo.com"
] | kwnafi@yahoo.com |
229d34fcec99565b3f6d19af0cbe8c1e7108dde1 | ecb22ddf7a927d320d2447feddf970c6ed81adbe | /src/plotAnswerLengthDistribution.py | f6a1c47810f11104d60e0195c7f698393074b053 | [] | no_license | shiannn/ADL2020-HW2-BertForQA | b1733339703dffb2fbdda481a5f090c26182c4a4 | 9e4f38bdeaaf61bd2c08ddd163271a699f21f16e | refs/heads/master | 2022-12-17T00:02:48.287380 | 2020-09-27T08:26:47 | 2020-09-27T08:26:47 | 257,008,637 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,676 | py | import sys
import json
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
from transformers import BertTokenizer
if __name__ == '__main__':
if len(sys.argv) != 3:
print('usage: python3 plotAnswerLengthDistribution.py dataName saveName')
exit(0)
dataName = sys.argv[1]
saveName = sys.argv[2]
ansLengthFile = Path('ansLength.npy')
print(ansLengthFile.exists())
if(not ansLengthFile.exists()):
tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')
distributionList = []
with open(dataName, 'r') as f:
A = json.load(f)
#view all answer
for data in A['data']:
#print(data['paragraphs'])
for paragraph in data['paragraphs']:
for qas in paragraph['qas']:
for ans in qas['answers']:
#temp = ('#' in ans['text'])
#if temp == True:
# print('a')
print(ans)
ansTokens = tokenizer.tokenize(ans['text'])
print(ansTokens)
distributionList.append(len(ansTokens))
np.save('ansLength', np.array(distributionList))
ansLength = np.load(ansLengthFile)
print(len(ansLength))
bins = np.arange(0,120+5, step=5)
print(bins)
plt.hist(ansLength, bins=bins, edgecolor='black', cumulative=True, density=True)
plt.xlabel('Length')
plt.ylabel('Count (%)')
plt.title('Cumulative Answer Length')
plt.savefig(saveName/Path('length.png')) | [
"b05502087@ntu.edu.tw"
] | b05502087@ntu.edu.tw |
cbfa5d9b795b084ed6548df8174d6450302ffb67 | d4bb21370ab020aa9d1dad2d812cdd0f25722ed4 | /test/support/git_fixture.py | c1102e4132706de3b9ce846406dfe120dc7f1820 | [
"MIT"
] | permissive | richo/groundstation | e6b74fb0a428b26408feae06ce16ad98997f2709 | 7ed48dd355051ee6b71164fc801e3893c09d11db | refs/heads/master | 2023-07-21T16:20:17.244184 | 2018-12-09T22:39:41 | 2018-12-09T22:39:41 | 7,293,510 | 27 | 5 | MIT | 2023-07-06T21:04:24 | 2012-12-23T09:09:49 | Python | UTF-8 | Python | false | false | 372 | py | def fake_tree():
return """100644 blob fadc864ddfed4a93fabf6d23939db4d542eb4363
.gitignore100644 blob 48e87b133a2594371acd57c49339dc8c04d55146 .gitmodules
100644 blob 725455bca81c809ad55aac363c633988f9207620 .jshintignore
100644 blob 40928639c7903f83f26e1aed78401ffde587e437 .jshintrc
100644 blob f3a9c9a807be340a7b929557aea3088540c77a6c .rbenv-version"""
| [
"richo@psych0tik.net"
] | richo@psych0tik.net |
b15bbefa83db24057d7e6d691d828073a55b7df7 | b1bc2e54f8cd35c9abb6fc4adb35b386c12fe6b4 | /toontown/src/leveleditor/SignEditFrame.py | 2250ee50af5616189a902e0af0626baa8271b90d | [] | no_license | satire6/Anesidora | da3a44e2a49b85252b87b612b435fb4970469583 | 0e7bfc1fe29fd595df0b982e40f94c30befb1ec7 | refs/heads/master | 2022-12-16T20:05:13.167119 | 2020-09-11T16:58:04 | 2020-09-11T17:02:06 | 294,751,966 | 89 | 32 | null | null | null | null | UTF-8 | Python | false | false | 15,501 | py | """
Toontown Sign Edit Frame
"""
import wx
from LevelStyleManager import *
from wx.lib.agw.knobctrl import *
class ToonKnobCtrl(KnobCtrl):
def __init__(self, parent, scale=1, id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize):
KnobCtrl.__init__(self, parent, id, pos, size)
self._totalVal = 0.0
self._oldVal = 0.0
self._scale = scale
def SetTotal(self, totalVal):
self._totalVal = totalVal*self._scale
def SetTrackPosition(self):
""" Used internally. """
width, height = self.GetSize()
x = self._mousePosition.x
y = self._mousePosition.y
ang = self.GetAngleFromCoord(x, y)
val = ang*180.0/math.pi
deltarange = self._maxvalue - self._minvalue
deltaangle = self._angleend - self._anglestart
coeff = float(deltaangle)/float(deltarange)
if self._anglestart < 0 and val >= 360.0 + self._anglestart:
scaledval = (val - (360.0 + self._anglestart))/coeff
else:
scaledval = (val - self._anglestart)/coeff
diff = val - self._oldVal
absdiff = abs(diff)
if absdiff >= 0.0 and absdiff < 180.0:
self._totalVal = self._totalVal + diff
elif absdiff >= 180.0 and absdiff < 360.0:
if 360.0 > 360.0 - diff:
self._totalVal = self._totalVal - (360.0 - diff)
else:
self._totalVal = self._totalVal + (360.0 + diff)
event = KnobCtrlEvent(wxKC_EVENT_ANGLE_CHANGING, self.GetId())
event.SetEventObject(self)
event.SetOldValue(self.GetValue())
event.SetValue(round(self._totalVal/self._scale, 2))
if self.GetEventHandler().ProcessEvent(event):
# the caller didn't use event.Skip()
return
self.SetValue(scaledval)
event.SetEventType(wxKC_EVENT_ANGLE_CHANGED)
event.SetOldValue(scaledval)
self.GetEventHandler().ProcessEvent(event)
self._old_ang = ang
self._oldVal = val
class ToonSignTextCtrlValidator(wx.PyValidator):
def __init__(self):
wx.PyValidator.__init__(self)
def Clone(self):
return ToonSignTextCtrlValidator()
def TransferToWindow(self):
return True
def TransferFromWindow(self):
return True
def Validate(self, win):
textCtrl = self.GetWindow()
text = textCtrl.GetValue()
#print "Validating %s" %(text)
try:
#print "Valid %d" %(float(text))
return True
except ValueError:
return False
class ToonSignTextCtrl(wx.TextCtrl):
def __init__(self, parent, id=-1, value=wx.EmptyString, pos=wx.DefaultPosition):
wx.TextCtrl.__init__(self, parent.panel, id, pos=pos, value=value, validator=ToonSignTextCtrlValidator())
self.parent = parent
self.Bind(wx.EVT_TEXT, self.OnText)
self.Bind(wx.EVT_SET_FOCUS, self.OnSetFocus)
def OnText(self, event):
#print "Got %s" %(self.GetValue())
try:
val = float(self.GetValue())
self.parent.WritePandaValue(self, val)
self.parent.knobCtrl.SetTotal(val)
except ValueError:
#print "Clearing..."
i=0
def OnSetFocus(self, event):
#print "Setting total %d" %(float(self.GetValue()))
self.parent.knobCtrl.SetTotal(float(self.GetValue()))
class SignEditFrame(wx.MiniFrame):
def __init__(self, parent, editor, baselineDNA, objNP, hasGraphics=False):
wx.MiniFrame.__init__(self, parent, -1, 'Sign Text', size=(400, 500), style=wx.DEFAULT_FRAME_STYLE|wx.FRAME_FLOAT_ON_PARENT)
self.panel = wx.Panel(self, -1, size=(400, 500))
self.editor = editor
self.baselineDNA = baselineDNA
self.txtOrig = DNAGetBaselineString(self.baselineDNA)
self.baselineStyleOrig = DNABaselineStyle()
self.baselineStyleOrig.copy(self.baselineDNA)
self.objNP = objNP
self.scale = 100
self.hasGraphics = hasGraphics
self.signTxtStatic = wx.StaticText(self.panel, -1, "Caption", pos=(15, 15))
self.signTxt = wx.TextCtrl(self.panel, -1, "", pos=(60, 15), size=(270, 20))
fontChoices = self.editor.styleManager.getCatalogCodes('font')
self.fontStatic = wx.StaticText(self.panel, -1, "Font", pos=(15, 45))
self.fontChoice = wx.ComboBox(self.panel, -1, "", pos=(60, 45), size=(100, 20), choices=fontChoices, style=wx.CB_READONLY)
self.CapFirstLetterCheck = wx.CheckBox(self.panel, -1, "Capitalize First Letter", pos=(215, 45))
self.AllCapsCheck = wx.CheckBox(self.panel, -1, "Make All Caps", pos=(215, 65))
self.DropShadowCheck = wx.CheckBox(self.panel, -1, "Drop Shadow", pos=(215, 85))
self.topOffset = 90
self.kernStatic = wx.StaticText(self.panel, -1, "Kern", pos=(15, 15 + self.topOffset))
self.kernValue = ToonSignTextCtrl(self, -1, "0", pos=(60, 15 + self.topOffset))
self.wiggleStatic = wx.StaticText(self.panel, -1, "Wiggle", pos=(15, 45 + self.topOffset))
self.wiggleValue = ToonSignTextCtrl(self, -1, "0", pos=(60, 45 + self.topOffset))
self.stumbleStatic = wx.StaticText(self.panel, -1, "Stumble", pos=(15, 75 + self.topOffset))
self.stumbleValue = ToonSignTextCtrl(self, -1, "0", pos=(60, 75 + self.topOffset))
self.stompStatic = wx.StaticText(self.panel, -1, "Stopm", pos=(15, 105 + self.topOffset))
self.stompValue = ToonSignTextCtrl(self, -1, "0", pos=(60, 105 + self.topOffset))
self.curveStatic = wx.StaticText(self.panel, -1, "Curve", pos=(15, 135 + self.topOffset))
self.curveValue = ToonSignTextCtrl(self, -1, "0", pos=(60, 135 + self.topOffset))
self.xStatic = wx.StaticText(self.panel, -1, "X", pos=(15, 165 + self.topOffset))
self.xValue = ToonSignTextCtrl(self, -1, "0", pos=(60, 165 + self.topOffset))
self.zStatic = wx.StaticText(self.panel, -1, "Z", pos=(15, 195 + self.topOffset))
self.zValue = ToonSignTextCtrl(self, -1, "0", pos=(60, 195 + self.topOffset))
self.xScaleStatic = wx.StaticText(self.panel, -1, "Scale X", pos=(15, 225 + self.topOffset))
self.xScaleValue = ToonSignTextCtrl(self, -1, "0", pos=(60, 225 + self.topOffset))
self.zScaleStatic = wx.StaticText(self.panel, -1, "Scale Z", pos=(15, 255 + self.topOffset))
self.zScaleValue = ToonSignTextCtrl(self, -1, "0", pos=(60, 255 + self.topOffset))
self.rollStatic = wx.StaticText(self.panel, -1, "Roll", pos=(15, 285 + self.topOffset))
self.rollValue = ToonSignTextCtrl(self, -1, "0", pos=(60, 285 + self.topOffset))
self.revertAllButton = wx.Button(self.panel, -1, "Revert All", pos=(60, 315 + self.topOffset), size=(100, 20))
#self.tmpValue = FloatSpin(self.panel, -1, pos=(15, 315 + self.topOffset))
#self.tmpSpinButton = wx.SpinButton(self.panel, -1, pos=(120, 315 + self.topOffset), size=(20, 20), style=wx.SP_VERTICAL)
if self.hasGraphics:
self.signTxt.Enable(False)
self.fontChoice.Enable(False)
self.CapFirstLetterCheck.Enable(False)
self.AllCapsCheck.Enable(False)
self.DropShadowCheck.Enable(False)
self.kernValue.Enable(False)
self.wiggleValue.Enable(False)
self.stumbleValue.Enable(False)
self.stompValue.Enable(False)
self.curveValue.Enable(False)
self.knobCtrl = ToonKnobCtrl(self.panel, scale=self.scale, pos=(200, 80 + self.topOffset), size=(150, 150))
self.knobCtrl.SetKnobRadius(6.0)
self.knobCtrl.SetAngularRange(0.0, 360.0)
self.ReadPandaValues(self.baselineDNA)
self.Bind(KC_EVENT_ANGLE_CHANGED, self.OnKnobAngleChanged, self.knobCtrl)
self.Bind(wx.EVT_TEXT, self.OnSignText, self.signTxt)
self.Bind(wx.EVT_COMBOBOX, self.OnFontChoice, self.fontChoice)
self.Bind(wx.EVT_BUTTON, self.OnRevertAll, self.revertAllButton)
self.Bind(wx.EVT_CHECKBOX, self.OnCapFirstLetterCheck, self.CapFirstLetterCheck)
self.Bind(wx.EVT_CHECKBOX, self.OnAllCapsCheck, self.AllCapsCheck)
self.Bind(wx.EVT_CHECKBOX, self.OnDropShadowCheck, self.DropShadowCheck)
# self.Bind(wx.EVT_SET_FOCUS, self.OnSetFocus)
# self.Bind(wx.EVT_KILL_FOCUS, self.OnKillFocus)
self.Bind(wx.EVT_SHOW, self.OnShowWindow)
# self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)
def OnKnobAngleChanged(self, event):
val = event.GetValue()
window = self.FindFocus()
if isinstance(window, ToonSignTextCtrl):
self.WritePandaValue(window, val)
window.SetValue(str(val))
def ReadPandaValues(self, baseline):
self.signTxt.SetValue(self.txtOrig)
self.fontChoice.SetValue(baseline.getCode())
self.kernValue.SetValue(str(round(baseline.getKern(), 2)))
self.wiggleValue.SetValue(str(round(baseline.getWiggle(), 2)))
self.stumbleValue.SetValue(str(round(baseline.getStumble(), 2)))
self.stompValue.SetValue(str(round(baseline.getStomp(), 2)))
width = baseline.getWidth()
if width:
curve = 1.0/width
else:
curve = 0.0
self.curveValue.SetValue(str(round(curve, 2)))
pos = baseline.getPos()
self.xValue.SetValue(str(round(pos[0], 2)))
self.zValue.SetValue(str(round(pos[2], 2)))
scale = baseline.getScale()
self.xScaleValue.SetValue(str(round(scale[0], 2)))
self.zScaleValue.SetValue(str(round(scale[2], 2)))
hpr = baseline.getHpr()
self.rollValue.SetValue(str(round(hpr[2], 2)))
flags = baseline.getFlags()
if 'b' in flags:
self.CapFirstLetterCheck.SetValue(True)
else:
self.CapFirstLetterCheck.SetValue(False)
if 'c' in flags:
self.AllCapsCheck.SetValue(True)
else:
self.AllCapsCheck.SetValue(False)
if 'd' in flags:
self.DropShadowCheck.SetValue(True)
else:
self.DropShadowCheck.SetValue(False)
color = baseline.getColor()
#TODO: implement color picker ansd set color
def WritePandaValue(self, window, val):
if window == self.kernValue:
self.SetSignBaselineKern(val)
elif window == self.wiggleValue:
self.SetSignBaselineWiggle(val)
elif window == self.stumbleValue:
self.SetSignBaselineStumble(val)
elif window == self.stompValue:
self.SetSignBaselineStomp(val)
elif window == self.curveValue:
self.SetSignBaselineCurve(val)
elif window == self.xValue:
self.SetSignBaselineX(val)
elif window == self.zValue:
self.SetSignBaselineZ(val)
elif window == self.xScaleValue:
self.SetSignBaselineScaleX(val)
elif window == self.zScaleValue:
self.SetSignBaselineScaleZ(val)
elif window == self.rollValue:
self.SetSignBaselineRoll(val)
def SetSignBaselineText(self, val):
if self.baselineDNA:
if self.hasGraphics == False:
DNASetBaselineString(self.baselineDNA, val)
self.objNP.replace()
def SetSignBaselineFont(self, val):
if self.baselineDNA:
self.baselineDNA.setCode(val)
self.objNP.replace()
def SetSignBaselineKern(self, val):
if self.baselineDNA:
self.baselineDNA.setKern(val)
self.objNP.replace()
def SetSignBaselineWiggle(self, val):
if self.baselineDNA:
self.baselineDNA.setWiggle(val)
self.objNP.replace()
def SetSignBaselineStumble(self, val):
if self.baselineDNA:
self.baselineDNA.setStumble(val)
self.objNP.replace()
def SetSignBaselineStomp(self, val):
if self.baselineDNA:
self.baselineDNA.setStomp(val)
self.objNP.replace()
def SetSignBaselineCurve(self, val):
if self.baselineDNA:
try:
val=1.0/val
except ZeroDivisionError:
val=0.0
self.baselineDNA.setWidth(val)
self.baselineDNA.setHeight(val)
self.objNP.replace()
def SetSignBaselineX(self, val):
if self.baselineDNA:
pos=self.baselineDNA.getPos()
pos=VBase3(val, pos[1], pos[2])
self.baselineDNA.setPos(pos)
self.objNP.replace()
def SetSignBaselineZ(self, val):
if self.baselineDNA:
pos=self.baselineDNA.getPos()
pos=VBase3(pos[0], pos[1], val)
self.baselineDNA.setPos(pos)
self.objNP.replace()
def SetSignBaselineScaleX(self, val):
if self.baselineDNA:
scale=self.baselineDNA.getScale()
scale=VBase3(val, scale[1], scale[2])
self.baselineDNA.setScale(scale)
self.objNP.replace()
def SetSignBaselineScaleZ(self, val):
if self.baselineDNA:
scale=self.baselineDNA.getScale()
scale=VBase3(scale[0], scale[1], val)
self.baselineDNA.setScale(scale)
self.objNP.replace()
def SetSignBaselineRoll(self, val):
if self.baselineDNA:
hpr=self.baselineDNA.getHpr()
hpr=VBase3(hpr[0], hpr[1], val)
self.baselineDNA.setHpr(hpr)
self.objNP.replace()
def SetSignBaselineColor(self, var):
if self.baselineDNA:
self.baselineDNA.setColor(var)
self.objNP.replace()
def SetSignBaselineFlag(self, flagChar):
if self.baselineDNA:
flags = self.baselineDNA.getFlags()
if not flagChar in flags:
# Add the flag:
self.baselineDNA.setFlags(flags+flagChar)
elif flagChar in flags:
# Remove the flag:
flags=string.join(flags.split(flagChar), '')
self.baselineDNA.setFlags(flags)
self.objNP.replace()
def OnSignText(self, event):
self.SetSignBaselineText(self.signTxt.GetValue())
def OnFontChoice(self, event):
self.SetSignBaselineFont(self.fontChoice.GetValue())
def OnCapFirstLetterCheck(self, event):
self.SetSignBaselineFlag('b')
def OnAllCapsCheck(self, event):
self.SetSignBaselineFlag('c')
def OnDropShadowCheck(self, event):
self.SetSignBaselineFlag('d')
def OnRevertAll(self, event):
self.baselineStyleOrig.copyTo(self.baselineDNA)
self.ReadPandaValues(self.baselineDNA)
def OnSetFocus(self, event):
self.editor.ui.bindKeyEvents(False)
def OnShowWindow(self, event):
self.editor.ui.bindKeyEvents(False)
def OnCloseWindow(self, event):
self.editor.ui.bindKeyEvents(True)
| [
"66761962+satire6@users.noreply.github.com"
] | 66761962+satire6@users.noreply.github.com |
5b7dfee3f6b4c14d728cbcd104dab4bee21ee794 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2761/60747/246217.py | 9d03172af498a3a1277332b3a310877d13fc4478 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | n=int(input())
result=[]
for i in range(n):
sum=0
num=int(input())
for i in range(num+1):
sum=sum+i*i
result.append(sum)
for f in range(n):
print(result[f]) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
510f696ac8f8d51de2808c5a04deb4bf2d448ff3 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_54/457.py | eb264093bd6ce407dfba0a75b15923f580c80baa | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 647 | py | from sys import stdin
def gcd(A, B):
a = A
b = B
while b != 0:
r = a % b
a = b
b = r
return a
if __name__ == '__main__':
C = int(stdin.readline())
for c in xrange(1, C + 1):
a = stdin.readline().split()
N = int(a[0])
t = map(long, a[1:])
first = t[0]
for i in xrange(N - 1):
t[i] = abs(t[i] - t[i + 1])
t[-1] = abs(t[-1] - first)
T = t[-1]
for i in xrange(N - 1):
T = gcd(T, t[i])
if first % T == 0:
y = 0
else:
y = T - first % T
print "Case #%d: %d" % (c, y)
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
f677802888481b96a2c6b0d4537ffe39daea4b66 | a14ec6e367e6a471bfc74c066fb958ef585bc269 | /2019/08/common.py | cb39afff51d0c807e5697dc5794d241a5969ab81 | [] | no_license | jimhendy/AoC | 90641814ed431f46a8500ff0f022c6c957567563 | a1727f88bc2e6f739d65902dce188377966b3fb4 | refs/heads/master | 2023-09-02T14:48:39.860352 | 2023-08-28T08:09:19 | 2023-08-28T08:09:19 | 225,152,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | import numpy as np
def in_to_array(inputs, n_rows=6, n_cols=25):
data = np.array(list(inputs))
return data.reshape(-1, n_rows, n_cols).astype(int)
| [
"jimhendy88@gmail.com"
] | jimhendy88@gmail.com |
8e022b65157dfae25c3997ca0bfcf8bfd5b3af03 | 57ea6657b4deb620c4e29b606a5ec259d22fadcd | /Chatbot_Web/impl/view/kg_overview.py | 44d5dfc7bee3607865aecc31e6b77c9d710eaab7 | [
"Apache-2.0"
] | permissive | orchestor/Chatbot_CN | 021d05849257d66e8e2a65d4ead5a777e09d7d3d | 43922d7f73946d00faad3f27d86188ec18022965 | refs/heads/master | 2020-05-09T12:48:48.124981 | 2019-04-09T13:54:24 | 2019-04-09T13:54:24 | 181,124,145 | 1 | 0 | Apache-2.0 | 2019-04-13T05:11:09 | 2019-04-13T05:11:06 | null | UTF-8 | Python | false | false | 3,273 | py | #-*- coding:utf-8 _*-
"""
@author:charlesXu
@file: kg_overview.py
@desc: 知识图谱概览页面
@time: 2019/01/28
"""
import sys
from django.shortcuts import render
from pinyin import pinyin
from Chatbot_KG.toolkit.pre_load import tree
def show_overview(request):
ctx = {}
if 'node' in request.GET:
node = request.GET['node']
fatherList = tree.get_father(node)
branchList = tree.get_branch(node)
leafList = tree.get_leaf(node)
ctx['node'] = "分类专题:[" + node + "]"
rownum = 4 # 一行的词条数量
leaf = ""
alpha_table = {}
for alpha in range(ord('A'), ord('Z') + 1):
alpha_table[chr(alpha)] = []
for p in leafList:
py = pinyin.get_initial(p)
alpha = ord('A')
for s in py:
t = ord(s)
if t >= ord('a') and t <= ord('z'):
t = t + ord('A') - ord('a')
if t >= ord('A') and t <= ord('Z'):
alpha = t
break
alpha_table[chr(alpha)].append(p)
for kk in range(ord('A'), ord('Z') + 1):
k = chr(kk)
v = alpha_table[k]
if len(v) == 0:
continue
add_num = rownum - len(v) % rownum # 填充的数量
add_num %= rownum
for i in range(add_num): # 补充上多余的空位
v.append('')
leaf += '<div><span class="label label-warning"> ' + k + ' </span></div><br/>'
for i in range(len(v)):
if i % rownum == 0:
leaf += "<div class='row'>"
leaf += '<div class="col-md-3">'
leaf += '<p><a href="detail?title=' + v[i] + '">'
if len(v[i]) > 10:
leaf += v[i][:10] + '...'
else:
leaf += v[i]
leaf += '</a></p>'
leaf += '</div>'
if i % rownum == rownum - 1:
leaf += "</div>"
leaf += '<br/>'
ctx['leaf'] = leaf
# 父节点列表
father = '<ul class="nav nav-pills nav-stacked">'
for p in fatherList:
father += '<li role="presentation"> <a href="overview?node='
father += p + '">'
father += '<i class="fa fa-hand-o-right" aria-hidden="true"></i> ' + p + '</a></li>'
father += '</ul>'
if len(fatherList) == 0:
father = '<p>已是最高级分类</p>'
ctx['father'] = father
# 非叶子节点列表
branch = '<ul class="nav nav-pills nav-stacked">'
for p in branchList:
branch += '<li role="presentation"> <a href="overview?node='
branch += p + '">'
branch += '<i class="fa fa-hand-o-right" aria-hidden="true"></i> ' + p + '</a></li>'
branch += '</ul>'
if len(branchList) == 0:
branch = '<p>已是最低级分类</p>'
ctx['branch'] = branch
# 分类树构建
level_tree = tree.create_UI(node)
ctx['level_tree'] = level_tree
return render(request, "knowledge_graph/kg_overview.html", ctx)
| [
"charlesxu86@163.com"
] | charlesxu86@163.com |
1f799e8e7be3bb458b963d6e3341620559bc09f8 | 0680311baa2a401f93bf4124fb6bbc229950848c | /model/one_stage_detector.py | a62d198fd9f49d880deaa56845b6398bc0a618c0 | [] | no_license | TonojiKiobya/m2det_pytorch | 8b56342862ef5dbc74dd905957cb41ab30273aff | 20a00c4ece288148e6112daa822451c6904560c6 | refs/heads/master | 2023-06-08T14:41:29.110670 | 2019-03-26T10:40:46 | 2019-03-26T10:40:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,483 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 5 16:05:16 2019
@author: ubuntu
"""
import logging
import torch.nn as nn
import numpy as np
import pycocotools.mask as maskUtils
import mmcv
from dataset.utils import tensor2imgs
from dataset.class_names import get_classes
from utils.registry_build import registered, build_module
@registered.register_module
class OneStageDetector(nn.Module):
"""one stage单级检测器: 整合了base/singlestagedetector在一起
"""
def __init__(self, cfg):
super(OneStageDetector, self).__init__()
# self.backbone = SSDVGG(**cfg.model.backbone)
# self.bbox_head = SSDHead(**cfg.model.bbox_head)
self.cfg = cfg
self.backbone = build_module(cfg.model.backbone, registered)
self.bbox_head = build_module(cfg.model.bbox_head, registered)
if cfg.model.neck is not None:
self.neck = build_module(cfg.model.neck, registered)
self.train_cfg = cfg.train_cfg
self.test_cfg = cfg.test_cfg
self.init_weights(pretrained=cfg.model.pretrained)
def init_weights(self, pretrained=None):
if pretrained is not None:
logger = logging.getLogger()
logger.info('load model from: {}'.format(pretrained))
self.backbone.init_weights(pretrained=pretrained)
self.bbox_head.init_weights()
def extract_feat(self, img):
x = self.backbone(img)
if self.cfg.model.neck is not None:
x = self.neck(x)
return x
def forward_train(self, img, img_metas, gt_bboxes, gt_labels):
x = self.extract_feat(img)
outs = self.bbox_head(x)
loss_inputs = outs + (gt_bboxes, gt_labels, img_metas, self.train_cfg)
losses = self.bbox_head.loss(*loss_inputs)
return losses
def forward_test(self, imgs, img_metas, **kwargs):
"""用于测试时的前向计算:如果是单张图则跳转到simple_test(),
如果是多张图则跳转到aug_test(),但ssd当前不支持多图测试(aug_test未实施)
即在验证时每个gpu只能放1张图片
"""
for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
if not isinstance(var, list):
raise TypeError('{} must be a list, but got {}'.format(
name, type(var)))
num_augs = len(imgs)
if num_augs != len(img_metas):
raise ValueError(
'num of augmentations ({}) != num of image meta ({})'.format(
len(imgs), len(img_metas)))
# TODO: remove the restriction of imgs_per_gpu == 1 when prepared
imgs_per_gpu = imgs[0].size(0)
assert imgs_per_gpu == 1
if num_augs == 1:
return self.simple_test(imgs[0], img_metas[0], **kwargs)
else:
return self.aug_test(imgs, img_metas, **kwargs)
def forward(self, img, img_meta, return_loss=True, **kwargs):
if return_loss:
return self.forward_train(img, img_meta, **kwargs)
else:
return self.forward_test(img, img_meta, **kwargs)
def simple_test(self, img, img_meta, rescale=False):
"""用于测试时单图前向计算:
输出
"""
x = self.extract_feat(img)
outs = self.bbox_head(x)
bbox_inputs = outs + (img_meta, self.test_cfg, rescale)
bbox_list = self.bbox_head.get_bboxes(*bbox_inputs)
bbox_results = [
self.bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)
for det_bboxes, det_labels in bbox_list
]
return bbox_results[0]
def aug_test(self, imgs, img_metas, rescale=False):
"""用于测试时多图前向计算: 当前ssd不支持多图测试"""
raise NotImplementedError
def show_result(self, data, result, img_norm_cfg,
dataset='coco',
score_thr=0.3):
if isinstance(result, tuple):
bbox_result, segm_result = result
else:
bbox_result, segm_result = result, None
img_tensor = data['img'][0]
img_metas = data['img_meta'][0].data[0]
imgs = tensor2imgs(img_tensor, **img_norm_cfg)
assert len(imgs) == len(img_metas)
if isinstance(dataset, str):
class_names = get_classes(dataset)
elif isinstance(dataset, (list, tuple)) or dataset is None:
class_names = dataset
else:
raise TypeError(
'dataset must be a valid dataset name or a sequence'
' of class names, not {}'.format(type(dataset)))
for img, img_meta in zip(imgs, img_metas):
h, w, _ = img_meta['img_shape']
img_show = img[:h, :w, :]
bboxes = np.vstack(bbox_result)
# draw segmentation masks
if segm_result is not None:
segms = mmcv.concat_list(segm_result)
inds = np.where(bboxes[:, -1] > score_thr)[0]
for i in inds:
color_mask = np.random.randint(
0, 256, (1, 3), dtype=np.uint8)
mask = maskUtils.decode(segms[i]).astype(np.bool)
img_show[mask] = img_show[mask] * 0.5 + color_mask * 0.5
# draw bounding boxes
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
mmcv.imshow_det_bboxes(
img_show,
bboxes,
labels,
class_names=class_names,
score_thr=score_thr)
def bbox2result(self, bboxes, labels, num_classes):
"""Convert detection results to a list of numpy arrays.
Args:
bboxes (Tensor): shape (n, 5)
labels (Tensor): shape (n, )
num_classes (int): class number, including background class
Returns:
list(ndarray): bbox results of each class
"""
if bboxes.shape[0] == 0:
return [
np.zeros((0, 5), dtype=np.float32) for i in range(num_classes - 1)
]
else:
bboxes = bboxes.cpu().numpy()
labels = labels.cpu().numpy()
return [bboxes[labels == i, :] for i in range(num_classes - 1)] | [
"ximitiejiang@163.com"
] | ximitiejiang@163.com |
8f358c1bc133197673c67695d220540b3e6a5394 | c8371b410f19dc87059bbe0a28e983c3cfe0f4f8 | /src/etheroll/roll.py | 1e52d4a97491b5fb857622c89d755a84f9517b56 | [
"MIT"
] | permissive | homdx/EtherollApp | c70e37cff4fbbde8c605a8ca87776535185a7167 | 4953ce0f10ac58d43517fbc3a18bc5ed43297858 | refs/heads/master | 2020-03-28T19:05:10.591229 | 2018-09-30T21:25:32 | 2018-09-30T21:25:32 | 148,942,827 | 0 | 0 | MIT | 2018-09-15T21:52:51 | 2018-09-15T21:52:51 | null | UTF-8 | Python | false | false | 5,457 | py | from etherscan.client import ConnectionRefused
from kivy.app import App
from kivy.clock import Clock, mainthread
from kivy.properties import NumericProperty, StringProperty
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.screenmanager import Screen
from etheroll.utils import Dialog, load_kv_from_py, run_in_thread
from pyetheroll.constants import ROUND_DIGITS
load_kv_from_py(__file__)
class RollUnderRecap(GridLayout):
roll_under_property = NumericProperty()
profit_property = NumericProperty()
wager_property = NumericProperty()
class BetSize(BoxLayout):
def __init__(self, **kwargs):
super(BetSize, self).__init__(**kwargs)
Clock.schedule_once(self._after_init)
def _after_init(self, dt):
"""
Binds events.
"""
slider = self.ids.bet_size_slider_id
inpt = self.ids.bet_size_input_id
cast_to = float
# shows less digits than the constant default to keep the input tiny
round_digits = 1
BetSize.bind_slider_input(slider, inpt, cast_to, round_digits)
@staticmethod
def bind_slider_input(
slider, inpt, cast_to=float, round_digits=ROUND_DIGITS):
"""
Binds slider <-> input both ways.
"""
# slider -> input
slider.bind(
value=lambda instance, value:
setattr(inpt, 'text', "{0:.{1}f}".format(
cast_to(value), round_digits)))
# input -> slider
inpt.bind(
on_text_validate=lambda instance:
setattr(slider, 'value', cast_to(inpt.text)))
# also when unfocused
inpt.bind(
focus=lambda instance, focused:
inpt.dispatch('on_text_validate')
if not focused else False)
# synchronises values slider <-> input once
inpt.dispatch('on_text_validate')
@property
def value(self):
"""
Returns normalized bet size value.
"""
try:
return round(
float(self.ids.bet_size_input_id.text), ROUND_DIGITS)
except ValueError:
return 0
class ChanceOfWinning(BoxLayout):
def __init__(self, **kwargs):
super(ChanceOfWinning, self).__init__(**kwargs)
Clock.schedule_once(self._after_init)
def _after_init(self, dt):
"""
Binds events.
"""
slider = self.ids.chances_slider_id
inpt = self.ids.chances_input_id
cast_to = self.cast_to
round_digits = 0
BetSize.bind_slider_input(slider, inpt, cast_to, round_digits)
@staticmethod
def cast_to(value):
return int(float(value))
@property
def value(self):
"""
Returns normalized chances value.
"""
try:
# `input_filter: 'int'` only verifies that we have a number
# but doesn't convert to int
chances = float(self.ids.chances_input_id.text)
return int(chances)
except ValueError:
return 0
class RollScreen(Screen):
current_account_string = StringProperty()
balance_property = NumericProperty()
def __init__(self, **kwargs):
super(RollScreen, self).__init__(**kwargs)
Clock.schedule_once(self._after_init)
def _after_init(self, dt):
"""
Binds `Controller.current_account` -> `RollScreen.current_account`.
"""
controller = App.get_running_app().root
controller.bind(current_account=self.on_current_account)
def on_current_account(self, instance, account):
"""
Sets current_account_string.
"""
if account is None:
return
self.current_account_string = '0x' + account.address.hex()
def get_roll_input(self):
"""
Returns bet size and chance of winning user input values.
"""
bet_size = self.ids.bet_size_id
chance_of_winning = self.ids.chance_of_winning_id
return {
"bet_size": bet_size.value,
"chances": chance_of_winning.value,
}
@mainthread
def toggle_widgets(self, enabled):
"""
Enables/disables widgets (useful during roll).
"""
self.disabled = not enabled
@property
def pyetheroll(self):
"""
We want to make sure we go through the `Controller.pyetheroll` property
each time, because it recreates the Etheroll object on chain_id
changes.
"""
controller = App.get_running_app().root
return controller.pyetheroll
@mainthread
def update_balance(self, balance):
"""
Updates the property from main thread.
"""
self.balance_property = balance
@staticmethod
@mainthread
def on_connection_refused():
title = 'No network'
body = 'No network, could not retrieve account balance.'
dialog = Dialog.create_dialog(title, body)
dialog.open()
@run_in_thread
def fetch_update_balance(self):
"""
Retrieves the balance and updates the property.
"""
address = self.current_account_string
if not address:
return
try:
balance = self.pyetheroll.get_balance(address)
except ConnectionRefused:
self.on_connection_refused()
return
self.update_balance(balance)
| [
"andre.miras@gmail.com"
] | andre.miras@gmail.com |
a07012e4a01d74426aafbfa004b80c190341161a | 2d05050d0ada29f7680b4df20c10bb85b0530e45 | /python/tvm/exec/gpu_memory_bandwidth.py | a5f2021f733c2cd1e3ba8e6c82cce9db1dc4c994 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"Zlib",
"LLVM-exception",
"BSD-2-Clause"
] | permissive | apache/tvm | 87cb617f9a131fa44e1693303aaddf70e7a4c403 | d75083cd97ede706338ab413dbc964009456d01b | refs/heads/main | 2023-09-04T11:24:26.263032 | 2023-09-04T07:26:00 | 2023-09-04T07:26:00 | 70,746,484 | 4,575 | 1,903 | Apache-2.0 | 2023-09-14T19:06:33 | 2016-10-12T22:20:28 | Python | UTF-8 | Python | false | false | 5,788 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A script to measure GPU memory bandwidth"""
import argparse
import itertools
import numpy as np
import tvm
from tvm import te, tir
from tvm.meta_schedule.runner import EvaluatorConfig
from tvm.testing import local_run
def _parse_args() -> argparse.Namespace:
def _parse_list_int(source: str):
return [int(i) for i in source.split(",")]
parser = argparse.ArgumentParser(
prog="GPU memory bandwidth testing",
description="""Example:
python -m tvm.exec.gpu_memory_bandwidth "nvidia/geforce-rtx-3090-ti" \
--dtype "float32"
--bx "8,16,32,64,128,256" \
--tx "32,64,128,256,512,1024" \
--vec "1,2,4"
""",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"target",
type=str,
help="The target to be benchmarked",
)
parser.add_argument(
"--xo",
type=int,
default=1024,
help="The value of `XO` in [XO, K, XI] => [XO, XI] reduction",
)
parser.add_argument(
"--k",
type=int,
default=64,
help="The value of `K` in [XO, K, XI] => [XO, XI] reduction",
)
parser.add_argument(
"--xi",
type=int,
default=4096,
help="The value of `XI` in [XO, K, XI] -> [XO, XI] reduction",
)
parser.add_argument(
"--dtype",
type=str,
default="float32",
help="The data type to be used in the workload",
)
parser.add_argument(
"--bx",
type=_parse_list_int,
default=[8, 16, 32, 64, 128, 256],
help="The value to be used to split `XO` into [BX, _]",
)
parser.add_argument(
"--tx",
type=_parse_list_int,
default=[32, 64, 128, 256, 512, 1024],
help="Number of threads to be used",
)
parser.add_argument(
"--vec",
type=_parse_list_int,
default=[1, 2, 4],
help="Vector length to be used in vectorized load",
)
return parser.parse_args()
def _workload(
len_xo: int,
len_k: int,
len_xi: int,
dtype: str,
):
# pylint: disable=invalid-name
A = te.placeholder((len_xo, len_k, len_xi), dtype=dtype, name="A")
k = te.reduce_axis((0, len_k), "k")
B = te.compute(
(len_xo, len_xi),
lambda i, j: te.sum(A[i, k, j], axis=k),
name="B",
)
# pylint: enable=invalid-name
return te.create_prim_func([A, B])
def _schedule(
sch: tir.Schedule,
len_bx: int,
len_tx: int,
len_vec: int,
):
# pylint: disable=invalid-name
block = sch.get_block("B")
xo, xi, k = sch.get_loops(block)
bx, xo = sch.split(xo, factors=[len_bx, None])
xi, tx, vec = sch.split(xi, factors=[None, len_tx, len_vec])
sch.reorder(bx, xi, tx, xo, k, vec)
bx = sch.fuse(bx, xi)
sch.bind(bx, "blockIdx.x")
sch.bind(tx, "threadIdx.x")
ldg = sch.cache_read(block, 0, "local")
sch.compute_at(ldg, k, preserve_unit_loops=True)
sch.vectorize(sch.get_loops(ldg)[-1])
sch.decompose_reduction(block, k)
# pylint: enable=invalid-name
def main(): # pylint: disable=too-many-locals
"""Entry point"""
args = _parse_args()
# pylint: disable=invalid-name
target = tvm.target.Target(args.target)
dtype = args.dtype
a = np.random.uniform(-1, 1, (args.xo, args.k, args.xi)).astype(dtype)
b = np.zeros((args.xo, args.xi), dtype=dtype)
num_bytes = a.size * a.itemsize + b.size * b.itemsize
print("###### Bandwidth Test ######")
print(
f"Workload [XO, K, XI] => [XO, XI]. "
f"[{args.xo}, {args.k}, {args.xi}] => [{args.xo}, {args.xi}]"
)
print(f"Input size: {num_bytes / 1048576} MB")
print(f"Target: {target}")
# pylint: enable=invalid-name
best_bandwidth = -1
for len_bx, len_tx, len_vec in itertools.product(
args.bx,
args.tx,
args.vec,
):
func = _workload(
len_xo=args.xo,
len_k=args.k,
len_xi=args.xi,
dtype=dtype,
)
sch = tir.Schedule(func)
_schedule(sch, len_bx, len_tx, len_vec)
_, profile_result = local_run(
tvm.build(sch.mod, target=target),
target.kind.name,
[a, b],
evaluator_config=EvaluatorConfig(
number=10,
repeat=1,
min_repeat_ms=100,
enable_cpu_cache_flush=False,
),
)
bandwidth = num_bytes / profile_result.mean / (1024**3)
bx = len_bx * args.xi // (len_tx * len_vec) # pylint: disable=invalid-name
mbs = num_bytes / 1024 / 1024
print(
f"bandwidth = {bandwidth:.3f} GB/s, bx = {bx}, tx = {len_tx}, "
f"len_vec = {len_vec}, bytes = {mbs} MB"
)
if bandwidth > best_bandwidth:
best_bandwidth = bandwidth
print(f"peak bandwidth: {best_bandwidth:.3f} GB/s")
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | apache.noreply@github.com |
0d9346ea6bfd98a4ea4aa39fdecc95754315d40f | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/A/anderspeders/sa_tester.py | 72d36cd4c6c8bfde2f227c119e5d3741ea5aa7be | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 954 | py | import scraperwiki
scraperwiki.sqlite.save_var('data_columns', ['id', 'name', 'adress', 'city', 'postcode', 'country', 'Virksomhedstype'])
ftUrl = 'http://ec.europa.eu/competition/elojade/isef/index.cfm'
import lxml.html
root = lxml.html.fromstring(ftUrl)
for tr in root.cssselect("div[align='left'] tr.tcont"):
tds = tr.cssselect("td")
data = {
'Navn' : tds[0].text_content(),
'Adresse' : int(tds[4].text_content())
}
print data
data=ftUrl
import scraperwiki
scraperwiki.sqlite.save_var('data_columns', ['id', 'name', 'adress', 'city', 'postcode', 'country', 'Virksomhedstype'])
ftUrl = 'http://ec.europa.eu/competition/elojade/isef/index.cfm'
import lxml.html
root = lxml.html.fromstring(ftUrl)
for tr in root.cssselect("div[align='left'] tr.tcont"):
tds = tr.cssselect("td")
data = {
'Navn' : tds[0].text_content(),
'Adresse' : int(tds[4].text_content())
}
print data
data=ftUrl
| [
"pallih@kaninka.net"
] | pallih@kaninka.net |
4b9458729e6b85451bdb9d0880d6f1ea08765ae7 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp-with-texts/CISCO-TRUSTSEC-CAPABILITY.py | 4df6656890b58be3f9bc3ebc42b576b04390624c | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 5,949 | py | #
# PySNMP MIB module CISCO-TRUSTSEC-CAPABILITY (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-TRUSTSEC-CAPABILITY
# Produced by pysmi-0.3.4 at Wed May 1 12:14:28 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsIntersection, ValueRangeConstraint, SingleValueConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsUnion")
ciscoAgentCapability, = mibBuilder.importSymbols("CISCO-SMI", "ciscoAgentCapability")
CtsPasswordEncryptionType, = mibBuilder.importSymbols("CISCO-TRUSTSEC-TC-MIB", "CtsPasswordEncryptionType")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
NotificationGroup, AgentCapabilities, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "AgentCapabilities", "ModuleCompliance")
IpAddress, iso, MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, Integer32, TimeTicks, Gauge32, NotificationType, Counter32, ObjectIdentity, ModuleIdentity, Counter64, Bits, Unsigned32 = mibBuilder.importSymbols("SNMPv2-SMI", "IpAddress", "iso", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "Integer32", "TimeTicks", "Gauge32", "NotificationType", "Counter32", "ObjectIdentity", "ModuleIdentity", "Counter64", "Bits", "Unsigned32")
DisplayString, RowStatus, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "RowStatus", "TextualConvention")
ciscoTrustSecCapability = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 7, 598))
ciscoTrustSecCapability.setRevisions(('2012-09-07 00:00', '2011-09-28 00:00', '2010-11-02 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: ciscoTrustSecCapability.setRevisionsDescriptions(('Added capability statements - ciscoTrustSecCapV15R0101SYPCat6kSup2T - ciscoTrustSecCapV15R0101SYPCat6kSup720 Added VARITION for object ctsSgtAssignmentMethod to the following capability statements: - ciscoTrustSecCapV12R0250SYPCat6k - ciscoTrustSecCapV15R0001SYPCat6k', 'Added capability statement ciscoTrustSecCapV15R0001SYPCat6k.', 'Initial version of this MIB module.',))
if mibBuilder.loadTexts: ciscoTrustSecCapability.setLastUpdated('201209070000Z')
if mibBuilder.loadTexts: ciscoTrustSecCapability.setOrganization('Cisco Systems, Inc.')
if mibBuilder.loadTexts: ciscoTrustSecCapability.setContactInfo('Cisco Systems Customer Service Postal: 170 West Tasman Drive San Jose, CA 95134 USA Tel: +1 800 553-NETS E-mail: cs-san@cisco.com, cs-lan-switch-snmp@cisco.com')
if mibBuilder.loadTexts: ciscoTrustSecCapability.setDescription('The capabilities description of CISCO-TRUSTSEC-MIB.')
ciscoTrustSecCapV12R0250SYPCat6k = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 598, 1))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoTrustSecCapV12R0250SYPCat6k = ciscoTrustSecCapV12R0250SYPCat6k.setProductRelease('Cisco IOS 12.2(50)SY on Catalyst 6000/6500\n series devices.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoTrustSecCapV12R0250SYPCat6k = ciscoTrustSecCapV12R0250SYPCat6k.setStatus('current')
if mibBuilder.loadTexts: ciscoTrustSecCapV12R0250SYPCat6k.setDescription('CISCO-TRUSTSEC-MIB capabilities.')
ciscoTrustSecCapV15R0001SYPCat6k = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 598, 2))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoTrustSecCapV15R0001SYPCat6k = ciscoTrustSecCapV15R0001SYPCat6k.setProductRelease('Cisco IOS 15.0(1)SY on Catalyst 6000/6500\n series devices.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoTrustSecCapV15R0001SYPCat6k = ciscoTrustSecCapV15R0001SYPCat6k.setStatus('current')
if mibBuilder.loadTexts: ciscoTrustSecCapV15R0001SYPCat6k.setDescription('CISCO-TRUSTSEC-MIB capabilities.')
ciscoTrustSecCapV15R0101SYPCat6kSup2T = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 598, 3))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoTrustSecCapV15R0101SYPCat6kSup2T = ciscoTrustSecCapV15R0101SYPCat6kSup2T.setProductRelease('Cisco IOS 15.1(1)SY on Catalyst 6000/6500\n series devices with Supervisor 2T present.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoTrustSecCapV15R0101SYPCat6kSup2T = ciscoTrustSecCapV15R0101SYPCat6kSup2T.setStatus('current')
if mibBuilder.loadTexts: ciscoTrustSecCapV15R0101SYPCat6kSup2T.setDescription('CISCO-TRUSTSEC-MIB capabilities.')
ciscoTrustSecCapV15R0101SYPCat6kSup720 = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 598, 4))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoTrustSecCapV15R0101SYPCat6kSup720 = ciscoTrustSecCapV15R0101SYPCat6kSup720.setProductRelease('Cisco IOS 15.1(1)SY on Catalyst 6000/6500\n series devices with Supervisor 720 present.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoTrustSecCapV15R0101SYPCat6kSup720 = ciscoTrustSecCapV15R0101SYPCat6kSup720.setStatus('current')
if mibBuilder.loadTexts: ciscoTrustSecCapV15R0101SYPCat6kSup720.setDescription('CISCO-TRUSTSEC-MIB capabilities.')
mibBuilder.exportSymbols("CISCO-TRUSTSEC-CAPABILITY", ciscoTrustSecCapV15R0001SYPCat6k=ciscoTrustSecCapV15R0001SYPCat6k, ciscoTrustSecCapV15R0101SYPCat6kSup720=ciscoTrustSecCapV15R0101SYPCat6kSup720, PYSNMP_MODULE_ID=ciscoTrustSecCapability, ciscoTrustSecCapV15R0101SYPCat6kSup2T=ciscoTrustSecCapV15R0101SYPCat6kSup2T, ciscoTrustSecCapability=ciscoTrustSecCapability, ciscoTrustSecCapV12R0250SYPCat6k=ciscoTrustSecCapV12R0250SYPCat6k)
| [
"dcwangmit01@gmail.com"
] | dcwangmit01@gmail.com |
e5a36deb356ed45d8a49e46daa46887d4e9d4c1e | 00c6ded41b84008489a126a36657a8dc773626a5 | /.history/Sizing_Method/ConstrainsAnalysis/DesignPointSelectStrategy_20210714202552.py | 2ac589eab5e3a7e94d03b273ea1e766f243f7093 | [] | no_license | 12libao/DEA | 85f5f4274edf72c7f030a356bae9c499e3afc2ed | 1c6f8109bbc18c4451a50eacad9b4dedd29682bd | refs/heads/master | 2023-06-17T02:10:40.184423 | 2021-07-16T19:05:18 | 2021-07-16T19:05:18 | 346,111,158 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,777 | py | # author: Bao Li #
# Georgia Institute of Technology #
import sys
import os
sys.path.insert(0, os.getcwd())
import numpy as np
import matplotlib.pylab as plt
import Sizing_Method.Other.US_Standard_Atmosphere_1976 as atm
import Sizing_Method.Aerodynamics.ThrustLapse as thrust_lapse
import Sizing_Method.Aerodynamics.Aerodynamics as ad
import Sizing_Method.ConstrainsAnalysis.ConstrainsAnalysis as ca
import Sizing_Method.ConstrainsAnalysis.ConstrainsAnalysisPD as ca_pd
import Sizing_Method.ConstrainsAnalysis.ConstrainsAnalysisPDP1P2 as ca_pd_12
from icecream import ic
"""
The unit use is IS standard
"""
class Design_Point_Select_Strategy:
"""This is a design point select strategy from constrains analysis"""
def __init__(self, altitude, velocity, beta, method, p_w_turbofan_max=72, p_w_motorfun_max=10, n=12):
"""
:param altitude: m x 1 matrix
:param velocity: m x 1 matrix
:param beta: P_motor/P_total m x 1 matrix
:param p_turbofan_max: maximum propulsion power for turbofan (threshold value)
:param p_motorfun_max: maximum propulsion power for motorfun (threshold value)
:param n: number of motor
the first group of condition is for stall speed
the stall speed condition have to use motor, therefore with PD
:return:
power load: design point p/w and w/s
"""
self.h = altitude
self.v = velocity
self.beta = beta
self.n_motor = n
self.p_w_turbofan_max = p_w_turbofan_max
self.p_w_motorfun_max = p_w_motorfun_max
# initialize the p_w, w_s, hp, n, m
self.n = 100
self.m = altitude.size
self.hp = np.linspace(0, 1+1/self.n, self.n+1)
self.hp_threshold = self.p_w_motorfun_max / (self.p_w_motorfun_max + self.p_w_turbofan_max)
# method1 = Mattingly_Method, method2 = Gudmundsson_Method
if method == 1:
self.method1 = ca_pd_12.ConstrainsAnalysis_Mattingly_Method_with_DP_turbofun
self.method2 = ca_pd_12.ConstrainsAnalysis_Mattingly_Method_with_DP_electric
else:
self.method1 = ca_pd_12.ConstrainsAnalysis_Gudmundsson_Method_with_DP_turbofun
self.method2 = ca_pd_12.ConstrainsAnalysis_Gudmundsson_Method_with_DP_electric
problem = self.method1(self.h[0], self.v[0], self.beta[0], 6000, self.hp_threshold)
self.w_s = problem.allFuncs[0](problem)
def p_w_compute(self):
p_w = np.zeros([self.m, len(self.hp)]) # m x (n+1) matrix
for i in range(1, 8):
for j in range(len(self.hp)):
problem1 = self.method1(self.h[i], self.v[i],
self.beta[i], self.w_s, self.hp[j])
problem2 = self.method2(self.h[i], self.v[i],
self.beta[i], self.w_s, self.hp[j])
if i >= 5:
p_w_1 = problem1.allFuncs[-1](problem1, roc=15 - 5 * (i - 5))
p_w_2 = problem2.allFuncs[-1](problem2, roc=15 - 5 * (i - 5))
else:
p_w_1 = problem1.allFuncs[i](problem1)
p_w_2 = problem2.allFuncs[i](problem2)
if p_w_1 > self.p_w_turbofan_max:
p_w_1 = 100000
elif p_w_2 > self.p_w_motorfun_max:
p_w_2 = 100000
p_w[i, j] = p_w_1 + p_w_2
return p_w
def strategy(self):
p_w = Design_Point_Select_Strategy.p_w_compute(self)
#find the min p_w for difference hp for each flight condition:
p_w_min = np.amin(p_w, axis=1)
#find the index of p_w_min which is the hp
hp_p_w_min = np.zeros(8)
for i in range(1, 8):
for j in range(len(self.hp)):
if p_w[i, j] - p_w_min[i] < 0.001:
hp_p_w_min[i] = j * 0.01
hp_p_w_min[0] = self.hp_threshold
#find the max p_w_min for each flight condition which is the design point we need:
design_point = np.array([self.w_s, np.amax(p_w_min)])
return hp_p_w_min, design_point
if __name__ == "__main__":
constrains = np.array([[0, 80, 1, 0.2], [0, 68, 0.988, 0.5], [11300, 230, 0.948, 0.8],
[11900, 230, 0.78, 0.8], [3000, 100,
0.984, 0.8], [0, 100, 0.984, 0.5],
[3000, 200, 0.975, 0.6], [7000, 230, 0.96, 0.7]])
h = constrains[:, 0]
v = constrains[:, 1]
beta = constrains[:, 2]
problem = Design_Point_Select_Strategy(h, v, beta, method=2)
hp_p_w_min, design_point = problem.strategy()
ic(hp_p_w_min, design_point)
| [
"libao@gatech.edu"
] | libao@gatech.edu |
255fd358de701401ad48cfbde9991b3438e745f9 | adea9fc9697f5201f4cb215571025b0493e96b25 | /napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/neighbors/neighbor/undefined_subtlvs/__init__.py | 66007c35b6922abe6c70648de62f9d14f5b7ff9c | [
"Apache-2.0"
] | permissive | andyjsharp/napalm-yang | d8a8b51896ef7c6490f011fe265db46f63f54248 | ef80ebbfb50e188f09486380c88b058db673c896 | refs/heads/develop | 2021-09-09T02:09:36.151629 | 2018-03-08T22:44:04 | 2018-03-08T22:44:04 | 114,273,455 | 0 | 0 | null | 2018-03-08T22:44:05 | 2017-12-14T16:33:35 | Python | UTF-8 | Python | false | false | 12,152 | py |
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
unicode = str
elif six.PY2:
import __builtin__
from . import undefined_subtlv
class undefined_subtlvs(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/isis-neighbor-attribute/neighbors/neighbor/undefined-subtlvs. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container describes undefined ISIS TLVs.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_extmethods', '__undefined_subtlv',)
_yang_name = 'undefined-subtlvs'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__undefined_subtlv = YANGDynClass(base=YANGListType("type",undefined_subtlv.undefined_subtlv, yang_name="undefined-subtlv", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='type', extensions=None), is_container='list', yang_name="undefined-subtlv", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'network-instances', u'network-instance', u'protocols', u'protocol', u'isis', u'levels', u'level', u'link-state-database', u'lsp', u'tlvs', u'tlv', u'isis-neighbor-attribute', u'neighbors', u'neighbor', u'undefined-subtlvs']
def _get_undefined_subtlv(self):
"""
Getter method for undefined_subtlv, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/neighbors/neighbor/undefined_subtlvs/undefined_subtlv (list)
YANG Description: Sub-TLVs that are not defined in the model or not recognised by
system.
"""
return self.__undefined_subtlv
def _set_undefined_subtlv(self, v, load=False):
"""
Setter method for undefined_subtlv, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/neighbors/neighbor/undefined_subtlvs/undefined_subtlv (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_undefined_subtlv is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_undefined_subtlv() directly.
YANG Description: Sub-TLVs that are not defined in the model or not recognised by
system.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("type",undefined_subtlv.undefined_subtlv, yang_name="undefined-subtlv", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='type', extensions=None), is_container='list', yang_name="undefined-subtlv", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """undefined_subtlv must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("type",undefined_subtlv.undefined_subtlv, yang_name="undefined-subtlv", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='type', extensions=None), is_container='list', yang_name="undefined-subtlv", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=False)""",
})
self.__undefined_subtlv = t
if hasattr(self, '_set'):
self._set()
def _unset_undefined_subtlv(self):
self.__undefined_subtlv = YANGDynClass(base=YANGListType("type",undefined_subtlv.undefined_subtlv, yang_name="undefined-subtlv", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='type', extensions=None), is_container='list', yang_name="undefined-subtlv", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=False)
undefined_subtlv = __builtin__.property(_get_undefined_subtlv)
_pyangbind_elements = {'undefined_subtlv': undefined_subtlv, }
from . import undefined_subtlv
class undefined_subtlvs(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/isis-neighbor-attribute/neighbors/neighbor/undefined-subtlvs. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container describes undefined ISIS TLVs.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_extmethods', '__undefined_subtlv',)
_yang_name = 'undefined-subtlvs'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__undefined_subtlv = YANGDynClass(base=YANGListType("type",undefined_subtlv.undefined_subtlv, yang_name="undefined-subtlv", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='type', extensions=None), is_container='list', yang_name="undefined-subtlv", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'network-instances', u'network-instance', u'protocols', u'protocol', u'isis', u'levels', u'level', u'link-state-database', u'lsp', u'tlvs', u'tlv', u'isis-neighbor-attribute', u'neighbors', u'neighbor', u'undefined-subtlvs']
def _get_undefined_subtlv(self):
"""
Getter method for undefined_subtlv, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/neighbors/neighbor/undefined_subtlvs/undefined_subtlv (list)
YANG Description: Sub-TLVs that are not defined in the model or not recognised by
system.
"""
return self.__undefined_subtlv
def _set_undefined_subtlv(self, v, load=False):
"""
Setter method for undefined_subtlv, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/neighbors/neighbor/undefined_subtlvs/undefined_subtlv (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_undefined_subtlv is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_undefined_subtlv() directly.
YANG Description: Sub-TLVs that are not defined in the model or not recognised by
system.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("type",undefined_subtlv.undefined_subtlv, yang_name="undefined-subtlv", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='type', extensions=None), is_container='list', yang_name="undefined-subtlv", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """undefined_subtlv must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("type",undefined_subtlv.undefined_subtlv, yang_name="undefined-subtlv", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='type', extensions=None), is_container='list', yang_name="undefined-subtlv", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=False)""",
})
self.__undefined_subtlv = t
if hasattr(self, '_set'):
self._set()
def _unset_undefined_subtlv(self):
self.__undefined_subtlv = YANGDynClass(base=YANGListType("type",undefined_subtlv.undefined_subtlv, yang_name="undefined-subtlv", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='type', extensions=None), is_container='list', yang_name="undefined-subtlv", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=False)
undefined_subtlv = __builtin__.property(_get_undefined_subtlv)
_pyangbind_elements = {'undefined_subtlv': undefined_subtlv, }
| [
"dbarrosop@dravetech.com"
] | dbarrosop@dravetech.com |
2a89720d50718fac3136b6f618636ce92d82c4a3 | 65329299fca8dcf2e204132624d9b0f8f8f39af7 | /napalm_yang/models/openconfig/components/component/transceiver/physical_channels/channel/state/__init__.py | 64250fef640609e72f0af8836abb44ed220b07b4 | [
"Apache-2.0"
] | permissive | darylturner/napalm-yang | bf30420e22d8926efdc0705165ed0441545cdacf | b14946b884ad2019b896ee151285900c89653f44 | refs/heads/master | 2021-05-14T12:17:37.424659 | 2017-11-17T07:32:49 | 2017-11-17T07:32:49 | 116,404,171 | 0 | 0 | null | 2018-01-05T16:21:37 | 2018-01-05T16:21:36 | null | UTF-8 | Python | false | false | 27,624 | py |
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import output_power
import input_power
import laser_bias_current
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-platform - based on the path /components/component/transceiver/physical-channels/channel/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Operational state data for channels
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_extmethods', '__index','__description','__tx_laser','__target_output_power','__output_frequency','__output_power','__input_power','__laser_bias_current',)
_yang_name = 'state'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__tx_laser = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="tx-laser", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/transceiver', defining_module='openconfig-platform-transceiver', yang_type='boolean', is_config=False)
self.__index = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': [u'0..max']}), is_leaf=True, yang_name="index", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/transceiver', defining_module='openconfig-platform-transceiver', yang_type='uint16', is_config=False)
self.__laser_bias_current = YANGDynClass(base=laser_bias_current.laser_bias_current, is_container='container', yang_name="laser-bias-current", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/platform/transceiver', defining_module='openconfig-platform-transceiver', yang_type='container', is_config=False)
self.__description = YANGDynClass(base=unicode, is_leaf=True, yang_name="description", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/transceiver', defining_module='openconfig-platform-transceiver', yang_type='string', is_config=False)
self.__output_power = YANGDynClass(base=output_power.output_power, is_container='container', yang_name="output-power", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/platform/transceiver', defining_module='openconfig-platform-transceiver', yang_type='container', is_config=False)
self.__target_output_power = YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="target-output-power", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/transceiver', defining_module='openconfig-platform-transceiver', yang_type='decimal64', is_config=False)
self.__output_frequency = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="output-frequency", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/transceiver', defining_module='openconfig-platform-transceiver', yang_type='oc-opt-types:frequency-type', is_config=False)
self.__input_power = YANGDynClass(base=input_power.input_power, is_container='container', yang_name="input-power", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/platform/transceiver', defining_module='openconfig-platform-transceiver', yang_type='container', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'components', u'component', u'transceiver', u'physical-channels', u'channel', u'state']
def _get_index(self):
"""
Getter method for index, mapped from YANG variable /components/component/transceiver/physical_channels/channel/state/index (uint16)
YANG Description: Index of the physical channnel or lane within a physical
client port
"""
return self.__index
def _set_index(self, v, load=False):
"""
Setter method for index, mapped from YANG variable /components/component/transceiver/physical_channels/channel/state/index (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_index is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_index() directly.
YANG Description: Index of the physical channnel or lane within a physical
client port
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': [u'0..max']}), is_leaf=True, yang_name="index", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/transceiver', defining_module='openconfig-platform-transceiver', yang_type='uint16', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """index must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': [u'0..max']}), is_leaf=True, yang_name="index", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/transceiver', defining_module='openconfig-platform-transceiver', yang_type='uint16', is_config=False)""",
})
self.__index = t
if hasattr(self, '_set'):
self._set()
def _unset_index(self):
self.__index = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': [u'0..max']}), is_leaf=True, yang_name="index", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/transceiver', defining_module='openconfig-platform-transceiver', yang_type='uint16', is_config=False)
def _get_description(self):
"""
Getter method for description, mapped from YANG variable /components/component/transceiver/physical_channels/channel/state/description (string)
YANG Description: Text description for the client physical channel
"""
return self.__description
def _set_description(self, v, load=False):
"""
Setter method for description, mapped from YANG variable /components/component/transceiver/physical_channels/channel/state/description (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_description is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_description() directly.
YANG Description: Text description for the client physical channel
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="description", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/transceiver', defining_module='openconfig-platform-transceiver', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """description must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="description", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/transceiver', defining_module='openconfig-platform-transceiver', yang_type='string', is_config=False)""",
})
self.__description = t
if hasattr(self, '_set'):
self._set()
def _unset_description(self):
self.__description = YANGDynClass(base=unicode, is_leaf=True, yang_name="description", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/transceiver', defining_module='openconfig-platform-transceiver', yang_type='string', is_config=False)
def _get_tx_laser(self):
"""
Getter method for tx_laser, mapped from YANG variable /components/component/transceiver/physical_channels/channel/state/tx_laser (boolean)
YANG Description: Enable (true) or disable (false) the transmit label for the
channel
"""
return self.__tx_laser
def _set_tx_laser(self, v, load=False):
"""
Setter method for tx_laser, mapped from YANG variable /components/component/transceiver/physical_channels/channel/state/tx_laser (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_tx_laser is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tx_laser() directly.
YANG Description: Enable (true) or disable (false) the transmit label for the
channel
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="tx-laser", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/transceiver', defining_module='openconfig-platform-transceiver', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """tx_laser must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="tx-laser", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/transceiver', defining_module='openconfig-platform-transceiver', yang_type='boolean', is_config=False)""",
})
self.__tx_laser = t
if hasattr(self, '_set'):
self._set()
def _unset_tx_laser(self):
self.__tx_laser = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="tx-laser", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/transceiver', defining_module='openconfig-platform-transceiver', yang_type='boolean', is_config=False)
def _get_target_output_power(self):
"""
Getter method for target_output_power, mapped from YANG variable /components/component/transceiver/physical_channels/channel/state/target_output_power (decimal64)
YANG Description: Target output optical power level of the optical channel,
expressed in increments of 0.01 dBm (decibel-milliwats)
"""
return self.__target_output_power
def _set_target_output_power(self, v, load=False):
"""
Setter method for target_output_power, mapped from YANG variable /components/component/transceiver/physical_channels/channel/state/target_output_power (decimal64)
If this variable is read-only (config: false) in the
source YANG file, then _set_target_output_power is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_target_output_power() directly.
YANG Description: Target output optical power level of the optical channel,
expressed in increments of 0.01 dBm (decibel-milliwats)
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="target-output-power", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/transceiver', defining_module='openconfig-platform-transceiver', yang_type='decimal64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """target_output_power must be of a type compatible with decimal64""",
'defined-type': "decimal64",
'generated-type': """YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="target-output-power", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/transceiver', defining_module='openconfig-platform-transceiver', yang_type='decimal64', is_config=False)""",
})
self.__target_output_power = t
if hasattr(self, '_set'):
self._set()
def _unset_target_output_power(self):
self.__target_output_power = YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="target-output-power", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/transceiver', defining_module='openconfig-platform-transceiver', yang_type='decimal64', is_config=False)
def _get_output_frequency(self):
"""
Getter method for output_frequency, mapped from YANG variable /components/component/transceiver/physical_channels/channel/state/output_frequency (oc-opt-types:frequency-type)
YANG Description: The frequency in MHz of the individual physical channel
(e.g. ITU C50 - 195.0THz and would be reported as
195,000,000 MHz in this model). This attribute is not
configurable on most client ports.
"""
return self.__output_frequency
def _set_output_frequency(self, v, load=False):
"""
Setter method for output_frequency, mapped from YANG variable /components/component/transceiver/physical_channels/channel/state/output_frequency (oc-opt-types:frequency-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_output_frequency is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_output_frequency() directly.
YANG Description: The frequency in MHz of the individual physical channel
(e.g. ITU C50 - 195.0THz and would be reported as
195,000,000 MHz in this model). This attribute is not
configurable on most client ports.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="output-frequency", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/transceiver', defining_module='openconfig-platform-transceiver', yang_type='oc-opt-types:frequency-type', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """output_frequency must be of a type compatible with oc-opt-types:frequency-type""",
'defined-type': "oc-opt-types:frequency-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="output-frequency", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/transceiver', defining_module='openconfig-platform-transceiver', yang_type='oc-opt-types:frequency-type', is_config=False)""",
})
self.__output_frequency = t
if hasattr(self, '_set'):
self._set()
def _unset_output_frequency(self):
self.__output_frequency = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="output-frequency", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/platform/transceiver', defining_module='openconfig-platform-transceiver', yang_type='oc-opt-types:frequency-type', is_config=False)
def _get_output_power(self):
"""
Getter method for output_power, mapped from YANG variable /components/component/transceiver/physical_channels/channel/state/output_power (container)
YANG Description: The output optical power of this port in units of 0.01dBm.
If the port is an aggregate of multiple physical channels,
this attribute is the total power or sum of all channels.
Values include the instantaneous, average, minimum, and
maximum statistics. If avg/min/max statistics are not
supported, the target is expected to just supply the
instant value
"""
return self.__output_power
def _set_output_power(self, v, load=False):
"""
Setter method for output_power, mapped from YANG variable /components/component/transceiver/physical_channels/channel/state/output_power (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_output_power is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_output_power() directly.
YANG Description: The output optical power of this port in units of 0.01dBm.
If the port is an aggregate of multiple physical channels,
this attribute is the total power or sum of all channels.
Values include the instantaneous, average, minimum, and
maximum statistics. If avg/min/max statistics are not
supported, the target is expected to just supply the
instant value
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=output_power.output_power, is_container='container', yang_name="output-power", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/platform/transceiver', defining_module='openconfig-platform-transceiver', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """output_power must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=output_power.output_power, is_container='container', yang_name="output-power", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/platform/transceiver', defining_module='openconfig-platform-transceiver', yang_type='container', is_config=False)""",
})
self.__output_power = t
if hasattr(self, '_set'):
self._set()
def _unset_output_power(self):
self.__output_power = YANGDynClass(base=output_power.output_power, is_container='container', yang_name="output-power", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/platform/transceiver', defining_module='openconfig-platform-transceiver', yang_type='container', is_config=False)
def _get_input_power(self):
"""
Getter method for input_power, mapped from YANG variable /components/component/transceiver/physical_channels/channel/state/input_power (container)
YANG Description: The input optical power of this port in units of 0.01dBm.
If the port is an aggregate of multiple physical channels,
this attribute is the total power or sum of all channels.
Values include the instantaneous, average, minimum, and
maximum statistics. If avg/min/max statistics are not
supported, the target is expected to just supply the
instant value
"""
return self.__input_power
def _set_input_power(self, v, load=False):
"""
Setter method for input_power, mapped from YANG variable /components/component/transceiver/physical_channels/channel/state/input_power (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_input_power is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_input_power() directly.
YANG Description: The input optical power of this port in units of 0.01dBm.
If the port is an aggregate of multiple physical channels,
this attribute is the total power or sum of all channels.
Values include the instantaneous, average, minimum, and
maximum statistics. If avg/min/max statistics are not
supported, the target is expected to just supply the
instant value
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=input_power.input_power, is_container='container', yang_name="input-power", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/platform/transceiver', defining_module='openconfig-platform-transceiver', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """input_power must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=input_power.input_power, is_container='container', yang_name="input-power", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/platform/transceiver', defining_module='openconfig-platform-transceiver', yang_type='container', is_config=False)""",
})
self.__input_power = t
if hasattr(self, '_set'):
self._set()
def _unset_input_power(self):
self.__input_power = YANGDynClass(base=input_power.input_power, is_container='container', yang_name="input-power", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/platform/transceiver', defining_module='openconfig-platform-transceiver', yang_type='container', is_config=False)
def _get_laser_bias_current(self):
"""
Getter method for laser_bias_current, mapped from YANG variable /components/component/transceiver/physical_channels/channel/state/laser_bias_current (container)
YANG Description: The current applied by the system to the transmit laser to
achieve the output power. The current is expressed in mA
with up to two decimal precision. Values include the
instantaneous, average, minimum, and maximum statistics.
If avg/min/max statistics are not supported, the target is
expected to just supply the instant value
"""
return self.__laser_bias_current
def _set_laser_bias_current(self, v, load=False):
"""
Setter method for laser_bias_current, mapped from YANG variable /components/component/transceiver/physical_channels/channel/state/laser_bias_current (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_laser_bias_current is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_laser_bias_current() directly.
YANG Description: The current applied by the system to the transmit laser to
achieve the output power. The current is expressed in mA
with up to two decimal precision. Values include the
instantaneous, average, minimum, and maximum statistics.
If avg/min/max statistics are not supported, the target is
expected to just supply the instant value
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=laser_bias_current.laser_bias_current, is_container='container', yang_name="laser-bias-current", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/platform/transceiver', defining_module='openconfig-platform-transceiver', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """laser_bias_current must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=laser_bias_current.laser_bias_current, is_container='container', yang_name="laser-bias-current", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/platform/transceiver', defining_module='openconfig-platform-transceiver', yang_type='container', is_config=False)""",
})
self.__laser_bias_current = t
if hasattr(self, '_set'):
self._set()
def _unset_laser_bias_current(self):
self.__laser_bias_current = YANGDynClass(base=laser_bias_current.laser_bias_current, is_container='container', yang_name="laser-bias-current", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/platform/transceiver', defining_module='openconfig-platform-transceiver', yang_type='container', is_config=False)
index = __builtin__.property(_get_index)
description = __builtin__.property(_get_description)
tx_laser = __builtin__.property(_get_tx_laser)
target_output_power = __builtin__.property(_get_target_output_power)
output_frequency = __builtin__.property(_get_output_frequency)
output_power = __builtin__.property(_get_output_power)
input_power = __builtin__.property(_get_input_power)
laser_bias_current = __builtin__.property(_get_laser_bias_current)
_pyangbind_elements = {'index': index, 'description': description, 'tx_laser': tx_laser, 'target_output_power': target_output_power, 'output_frequency': output_frequency, 'output_power': output_power, 'input_power': input_power, 'laser_bias_current': laser_bias_current, }
| [
"noreply@github.com"
] | darylturner.noreply@github.com |
3c8f27457cd7e7975d7b68a101cbdd624eb043d1 | 73c2716fc72d0a389f14f21a5de73da818b54dc4 | /udemy-recipe-api/app/user/views.py | 75c415e65ee1bdfb16165a1ebbd5bc9b67e3722d | [
"MIT"
] | permissive | washimimizuku/django-tutorials | e13a429aa43cee24d84466d4cf3f22c518b17673 | 4f0e3836778dd3ea5403ef713e2f6777e44eae8d | refs/heads/main | 2023-06-04T05:47:16.863511 | 2021-06-15T13:38:37 | 2021-06-15T13:38:37 | 363,867,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 930 | py | from rest_framework import generics, authentication, permissions
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from user.serializers import UserSerializer, AuthTokenSerializer
class CreateUserView(generics.CreateAPIView):
"""Create a new user in the system"""
serializer_class = UserSerializer
class CreateTokenView(ObtainAuthToken):
"""Create a new auth token for user"""
serializer_class = AuthTokenSerializer
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class ManageUserView(generics.RetrieveUpdateAPIView):
"""Manage the authenticated user"""
serializer_class = UserSerializer
authentication_classes = (authentication.TokenAuthentication,)
permission_classes = (permissions.IsAuthenticated,)
def get_object(self):
"""Retrieve and return authenticated user"""
return self.request.user
| [
"nuno.barreto@inventsys.ch"
] | nuno.barreto@inventsys.ch |
570549c76e261ef39c724067588e6f90a8bd18af | 57d1580fd540b4819abb67f9db43fdfbba63725f | /lib/var.py | 37390233f5572cf38a87bfd5b95f5894ff38f029 | [] | no_license | glyfish/alpaca | 49edfcb9d80551825dfa4cf071f21aeb95a3502f | 2b5b69bcf50ed081a526742658be503706af94b4 | refs/heads/master | 2023-02-22T00:24:19.293502 | 2022-09-05T17:20:23 | 2022-09-05T17:20:23 | 186,169,438 | 1 | 3 | null | 2023-02-11T00:52:12 | 2019-05-11T18:38:58 | Python | UTF-8 | Python | false | false | 7,328 | py | import numpy
from matplotlib import pyplot
from lib import config
def multivariate_normal_sample(μ, Ω, n):
return numpy.random.multivariate_normal(μ, Ω, n)
def timeseries_plot(samples, tmax, ylabel, title, plot_name):
nplot, nsample = samples.shape
ymin = numpy.amin(samples)
ymax = numpy.amax(samples)
figure, axis = pyplot.subplots(nplot, sharex=True, figsize=(12, 9))
axis[0].set_title(title)
axis[nplot-1].set_xlabel(r"$t$")
time = numpy.linspace(0, tmax-1, tmax)
for i in range(nplot):
stats=f"μ={format(numpy.mean(samples[i]), '2.2f')}\nσ={format(numpy.std(samples[i]), '2.2f')}"
bbox = dict(boxstyle='square,pad=1', facecolor="#FEFCEC", edgecolor="#FEFCEC", alpha=0.75)
axis[i].text(0.05, 0.75, stats, fontsize=15, bbox=bbox, transform=axis[i].transAxes)
axis[i].set_ylabel(ylabel[i])
axis[i].set_ylim([ymin, ymax])
axis[i].set_xlim([0.0, tmax])
axis[i].plot(time, samples[i,:tmax], lw=1.0)
config.save_post_asset(figure, "mean_reversion", plot_name)
def autocorrelation_plot(title, samples, γt, ylim, plot):
max_lag = len(γt)
figure, axis = pyplot.subplots(figsize=(10, 7))
axis.set_title(title)
axis.set_ylabel(r"$\gamma_{\tau}$")
axis.set_xlabel("Time Lag (τ)")
axis.set_xlim([-1.0, max_lag])
axis.set_ylim(ylim)
ac = autocorrelation(samples)
axis.plot(range(max_lag), numpy.real(ac[:max_lag]), marker='o', markersize=10.0, linestyle="None", markeredgewidth=1.0, alpha=0.75, label="Simulation", zorder=6)
axis.plot(range(max_lag), γt, lw="2", label=r"$γ_{\tau}$", zorder=5)
axis.legend(fontsize=16)
config.save_post_asset(figure, "mean_reversion", plot)
def cross_correlation_plot(title, x, y, γt, ylim, plot):
max_lag = len(γt)
figure, axis = pyplot.subplots(figsize=(10, 7))
axis.set_title(title)
axis.set_ylabel(r"$\gamma_{\tau}$")
axis.set_xlabel("Time Lag (τ)")
cc = cross_correlation(x, y)
axis.set_xlim([-1.0, max_lag])
axis.set_ylim(ylim)
axis.plot(range(max_lag), numpy.real(cc[:max_lag]), marker='o', markersize=10.0, linestyle="None", markeredgewidth=1.0, alpha=0.75, label="Simulation", zorder=6)
axis.plot(range(max_lag), γt, lw="2", label=r"$γ_{\tau}$", zorder=5)
axis.legend(fontsize=16)
config.save_post_asset(figure, "mean_reversion", plot)
def plot_data_frame(df, tmax, plot_name):
_, nplot = df.shape
if nplot > 4:
nrows = int(nplot / 2)
ncols = 2
else:
nrows = nplot
ncols = 1
figure, axis = pyplot.subplots(nrows=nrows, ncols=ncols, figsize=(10, 8))
for i, axis in enumerate(axis.flatten()):
data = df[df.columns[i]]
axis.plot(data[:tmax], lw=1)
axis.set_title(df.columns[i], fontsize=12)
axis.tick_params(axis="x", labelsize=10)
axis.tick_params(axis="y", labelsize=10)
pyplot.tight_layout(pad=1.0)
config.save_post_asset(figure, "mean_reversion", plot_name)
def time_series_to_data_frame(columns, series):
n = len(columns)
d = {}
for i in range(n):
d[columns[i]] = series[i]
return pandas.DataFrame(d)
def var_simulate(x0, μ, φ, Ω, n):
m, l = x0.shape
xt = numpy.zeros((m, n))
ε = multivariate_normal_sample(μ, Ω, n)
for i in range(l):
xt[:,i] = x0[:,i]
for i in range(l, n):
xt[:,i] = ε[i]
for j in range(l):
t1 = φ[j]*numpy.matrix(xt[:,i-j-1]).T
t2 = numpy.squeeze(numpy.array(t1), axis=1)
xt[:,i] += t2
return xt
def phi_companion_form(φ):
l, n, _ = φ.shape
p = φ[0]
for i in range(1,l):
p = numpy.concatenate((p, φ[i]), axis=1)
for i in range(1, n):
if i == 1:
r = numpy.eye(n)
else:
r = numpy.zeros((n, n))
for j in range(1,l):
if j == i - 1:
r = numpy.concatenate((r, numpy.eye(n)), axis=1)
else:
r = numpy.concatenate((r, numpy.zeros((n, n))), axis=1)
p = numpy.concatenate((p, r), axis=0)
return numpy.matrix(p)
def mean_companion_form(μ):
n = len(μ)
p = numpy.zeros(n**2)
p[:n] = μ
return numpy.matrix([p]).T
def omega_companion_form(ω):
n, _ = ω.shape
p = numpy.zeros((n**2, n**2))
p[:n, :n] = ω
return numpy.matrix(p)
def vec(m):
_, n = m.shape
v = numpy.matrix(numpy.zeros(n**2)).T
for i in range(n):
d = i*n
v[d:d+n] = m[:,i]
return v
def unvec(v):
n2, _ = v.shape
n = int(numpy.sqrt(n2))
m = numpy.matrix(numpy.zeros((n, n)))
for i in range(n):
d = i*n
m[:,i] = v[d:d+n]
return m
def stationary_mean(φ, μ):
Φ = phi_companion_form(φ)
Μ = mean_companion_form(μ)
n, _ = Φ.shape
tmp = numpy.matrix(numpy.eye(n)) - Φ
return numpy.linalg.inv(tmp)*Μ
def stationary_covariance_matrix(φ, ω):
Ω = omega_companion_form(ω)
Φ = phi_companion_form(φ)
n, _ = Φ.shape
eye = numpy.matrix(numpy.eye(n**2))
tmp = eye - numpy.kron(Φ, Φ)
inv_tmp = numpy.linalg.inv(tmp)
vec_var = inv_tmp * vec(Ω)
return unvec(vec_var)
def stationary_autocovariance_matrix(φ, ω, n):
t = numpy.linspace(0, n-1, n)
Φ = phi_companion_form(φ)
Σ = stationary_covariance_matrix(φ, ω)
l, _ = Φ.shape
γ = numpy.zeros((n, l, l))
γ[0] = numpy.matrix(numpy.eye(l))
for i in range(1,n):
γ[i] = γ[i-1]*Φ
for i in range(n):
γ[i] = Σ*γ[i].T
return γ
def eigen_values(φ):
Φ = phi_companion_form(φ)
λ, _ = numpy.linalg.eig(Φ)
return λ
def autocorrelation(x):
n = len(x)
x_shifted = x - x.mean()
x_padded = numpy.concatenate((x_shifted, numpy.zeros(n-1)))
x_fft = numpy.fft.fft(x_padded)
h_fft = numpy.conj(x_fft) * x_fft
ac = numpy.fft.ifft(h_fft)
return ac[0:n]/ac[0]
def cross_correlation(x, y):
n = len(x)
x_shifted = x - x.mean()
y_shifted = y - y.mean()
x_padded = numpy.concatenate((x_shifted, numpy.zeros(n-1)))
y_padded = numpy.concatenate((y_shifted, numpy.zeros(n-1)))
x_fft = numpy.fft.fft(x_padded)
y_fft = numpy.fft.fft(y_padded)
h_fft = numpy.conj(x_fft)*y_fft
cc = numpy.fft.ifft(h_fft)
return cc[0:n] / float(n)
def yt_parameter_estimation_form(xt):
l, n = xt.shape
yt = xt[:,l-1:n-1]
for i in range(2,l+1):
yt = numpy.concatenate((yt, xt[:,l-i:n-i]), axis=0)
return yt
def theta_parameter_estimation(xt):
l, n = xt.shape
yt = yt_parameter_estimation_form(xt)
m, _ = yt.shape
yy = numpy.matrix(numpy.zeros((m, m)))
xy = numpy.matrix(numpy.zeros((l, m)))
for i in range(l, n):
x = numpy.matrix(xt[:,i]).T
y = numpy.matrix(yt[:,i-l]).T
yy += y*y.T
xy += x*y.T
return xy*numpy.linalg.inv(yy)
def split_theta(theta):
l, _ = theta.shape
return numpy.split(theta, l, axis=1)
def omega_parameter_estimation(xt, theta):
l, n = xt.shape
yt = yt_parameter_estimation_form(xt)
omega = numpy.matrix(numpy.zeros((l, l)))
for i in range(l, n):
x = numpy.matrix(xt[:,i]).T
y = numpy.matrix(yt[:,i-l]).T
term = x - theta*y
omega += term*term.T
return omega / float(n-l)
| [
"troy.stribling@gmail.com"
] | troy.stribling@gmail.com |
6d45a0aafcf85c8215157029bef4318a5c2d0836 | 7ab41799fd38489c93282f1beb3b20e7ef8ff165 | /python/79.py | baa33400e8230c73e0d6ae18a6d3a474fff1fc2d | [] | no_license | scturtle/leetcode-sol | 86c4095df6b31a9fcad683f2d63669ce1691633c | e1a9ce5d9b8fe4bd11e50bd1d5ba1933de845db7 | refs/heads/master | 2020-04-23T00:01:37.016267 | 2015-11-21T04:15:27 | 2015-11-21T04:15:27 | 32,385,573 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,094 | py | class Solution(object):
@staticmethod
def search(board, word, idx, i, j, vit):
if idx == len(word):
return True
if i < 0 or i >= len(board) or\
j < 0 or j >= len(board[0]) or\
board[i][j] != word[idx]:
return False
vit.add((i, j))
for di, dj in ((1, 0), (0, 1), (-1, 0), (0, -1)):
ni, nj = i+di, j+dj
if (ni, nj) not in vit:
if Solution.search(board, word, idx+1, ni, nj, vit):
return True
vit.remove((i, j))
return False
def exist(self, board, word):
"""
:type board: List[List[str]]
:type word: str
:rtype: bool
"""
n = len(board)
if not n:
return False
m = len(board[0])
if not m:
return False
vit = set()
for i, j in itertools.product(range(n), range(m)):
if board[i][j] == word[0]:
if Solution.search(board, word, 0, i, j, vit):
return True
return False
| [
"scturtle@gmail.com"
] | scturtle@gmail.com |
8977ece66055df7039de24142ea83c6814014521 | e57d7785276053332c633b57f6925c90ad660580 | /sdk/synapse/azure-mgmt-synapse/azure/mgmt/synapse/_synapse_management_client.py | a9e7c48810f68e60ab64fbfda462bd5da4aca701 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | adriananeci/azure-sdk-for-python | 0d560308497616a563b6afecbb494a88535da4c5 | b2bdfe659210998d6d479e73b133b6c51eb2c009 | refs/heads/main | 2023-08-18T11:12:21.271042 | 2021-09-10T18:48:44 | 2021-09-10T18:48:44 | 405,684,423 | 1 | 0 | MIT | 2021-09-12T15:51:51 | 2021-09-12T15:51:50 | null | UTF-8 | Python | false | false | 30,140 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.mgmt.core import ARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Optional
from azure.core.credentials import TokenCredential
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from ._configuration import SynapseManagementClientConfiguration
from .operations import AzureADOnlyAuthenticationsOperations
from .operations import Operations
from .operations import IpFirewallRulesOperations
from .operations import KeysOperations
from .operations import PrivateEndpointConnectionsOperations
from .operations import PrivateLinkResourcesOperations
from .operations import PrivateLinkHubPrivateLinkResourcesOperations
from .operations import PrivateLinkHubsOperations
from .operations import PrivateEndpointConnectionsPrivateLinkHubOperations
from .operations import SqlPoolsOperations
from .operations import SqlPoolMetadataSyncConfigsOperations
from .operations import SqlPoolOperationResultsOperations
from .operations import SqlPoolGeoBackupPoliciesOperations
from .operations import SqlPoolDataWarehouseUserActivitiesOperations
from .operations import SqlPoolRestorePointsOperations
from .operations import SqlPoolReplicationLinksOperations
from .operations import SqlPoolMaintenanceWindowsOperations
from .operations import SqlPoolMaintenanceWindowOptionsOperations
from .operations import SqlPoolTransparentDataEncryptionsOperations
from .operations import SqlPoolBlobAuditingPoliciesOperations
from .operations import SqlPoolOperationsOperations
from .operations import SqlPoolUsagesOperations
from .operations import SqlPoolSensitivityLabelsOperations
from .operations import SqlPoolRecommendedSensitivityLabelsOperations
from .operations import SqlPoolSchemasOperations
from .operations import SqlPoolTablesOperations
from .operations import SqlPoolTableColumnsOperations
from .operations import SqlPoolConnectionPoliciesOperations
from .operations import SqlPoolVulnerabilityAssessmentsOperations
from .operations import SqlPoolVulnerabilityAssessmentScansOperations
from .operations import SqlPoolSecurityAlertPoliciesOperations
from .operations import SqlPoolVulnerabilityAssessmentRuleBaselinesOperations
from .operations import ExtendedSqlPoolBlobAuditingPoliciesOperations
from .operations import DataMaskingPoliciesOperations
from .operations import DataMaskingRulesOperations
from .operations import SqlPoolColumnsOperations
from .operations import SqlPoolWorkloadGroupOperations
from .operations import SqlPoolWorkloadClassifierOperations
from .operations import WorkspaceManagedSqlServerBlobAuditingPoliciesOperations
from .operations import WorkspaceManagedSqlServerExtendedBlobAuditingPoliciesOperations
from .operations import WorkspaceManagedSqlServerSecurityAlertPolicyOperations
from .operations import WorkspaceManagedSqlServerVulnerabilityAssessmentsOperations
from .operations import WorkspaceManagedSqlServerEncryptionProtectorOperations
from .operations import WorkspaceManagedSqlServerUsagesOperations
from .operations import WorkspaceManagedSqlServerRecoverableSqlPoolsOperations
from .operations import WorkspacesOperations
from .operations import WorkspaceAadAdminsOperations
from .operations import WorkspaceSqlAadAdminsOperations
from .operations import WorkspaceManagedIdentitySqlControlSettingsOperations
from .operations import RestorableDroppedSqlPoolsOperations
from .operations import BigDataPoolsOperations
from .operations import LibraryOperations
from .operations import LibrariesOperations
from .operations import IntegrationRuntimesOperations
from .operations import IntegrationRuntimeNodeIpAddressOperations
from .operations import IntegrationRuntimeObjectMetadataOperations
from .operations import IntegrationRuntimeNodesOperations
from .operations import IntegrationRuntimeCredentialsOperations
from .operations import IntegrationRuntimeConnectionInfosOperations
from .operations import IntegrationRuntimeAuthKeysOperations
from .operations import IntegrationRuntimeMonitoringDataOperations
from .operations import IntegrationRuntimeStatusOperations
from .operations import SparkConfigurationOperations
from .operations import SparkConfigurationsOperations
from . import models
class SynapseManagementClient(object):
"""Azure Synapse Analytics Management Client.
:ivar azure_ad_only_authentications: AzureADOnlyAuthenticationsOperations operations
:vartype azure_ad_only_authentications: azure.mgmt.synapse.operations.AzureADOnlyAuthenticationsOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.synapse.operations.Operations
:ivar ip_firewall_rules: IpFirewallRulesOperations operations
:vartype ip_firewall_rules: azure.mgmt.synapse.operations.IpFirewallRulesOperations
:ivar keys: KeysOperations operations
:vartype keys: azure.mgmt.synapse.operations.KeysOperations
:ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations
:vartype private_endpoint_connections: azure.mgmt.synapse.operations.PrivateEndpointConnectionsOperations
:ivar private_link_resources: PrivateLinkResourcesOperations operations
:vartype private_link_resources: azure.mgmt.synapse.operations.PrivateLinkResourcesOperations
:ivar private_link_hub_private_link_resources: PrivateLinkHubPrivateLinkResourcesOperations operations
:vartype private_link_hub_private_link_resources: azure.mgmt.synapse.operations.PrivateLinkHubPrivateLinkResourcesOperations
:ivar private_link_hubs: PrivateLinkHubsOperations operations
:vartype private_link_hubs: azure.mgmt.synapse.operations.PrivateLinkHubsOperations
:ivar private_endpoint_connections_private_link_hub: PrivateEndpointConnectionsPrivateLinkHubOperations operations
:vartype private_endpoint_connections_private_link_hub: azure.mgmt.synapse.operations.PrivateEndpointConnectionsPrivateLinkHubOperations
:ivar sql_pools: SqlPoolsOperations operations
:vartype sql_pools: azure.mgmt.synapse.operations.SqlPoolsOperations
:ivar sql_pool_metadata_sync_configs: SqlPoolMetadataSyncConfigsOperations operations
:vartype sql_pool_metadata_sync_configs: azure.mgmt.synapse.operations.SqlPoolMetadataSyncConfigsOperations
:ivar sql_pool_operation_results: SqlPoolOperationResultsOperations operations
:vartype sql_pool_operation_results: azure.mgmt.synapse.operations.SqlPoolOperationResultsOperations
:ivar sql_pool_geo_backup_policies: SqlPoolGeoBackupPoliciesOperations operations
:vartype sql_pool_geo_backup_policies: azure.mgmt.synapse.operations.SqlPoolGeoBackupPoliciesOperations
:ivar sql_pool_data_warehouse_user_activities: SqlPoolDataWarehouseUserActivitiesOperations operations
:vartype sql_pool_data_warehouse_user_activities: azure.mgmt.synapse.operations.SqlPoolDataWarehouseUserActivitiesOperations
:ivar sql_pool_restore_points: SqlPoolRestorePointsOperations operations
:vartype sql_pool_restore_points: azure.mgmt.synapse.operations.SqlPoolRestorePointsOperations
:ivar sql_pool_replication_links: SqlPoolReplicationLinksOperations operations
:vartype sql_pool_replication_links: azure.mgmt.synapse.operations.SqlPoolReplicationLinksOperations
:ivar sql_pool_maintenance_windows: SqlPoolMaintenanceWindowsOperations operations
:vartype sql_pool_maintenance_windows: azure.mgmt.synapse.operations.SqlPoolMaintenanceWindowsOperations
:ivar sql_pool_maintenance_window_options: SqlPoolMaintenanceWindowOptionsOperations operations
:vartype sql_pool_maintenance_window_options: azure.mgmt.synapse.operations.SqlPoolMaintenanceWindowOptionsOperations
:ivar sql_pool_transparent_data_encryptions: SqlPoolTransparentDataEncryptionsOperations operations
:vartype sql_pool_transparent_data_encryptions: azure.mgmt.synapse.operations.SqlPoolTransparentDataEncryptionsOperations
:ivar sql_pool_blob_auditing_policies: SqlPoolBlobAuditingPoliciesOperations operations
:vartype sql_pool_blob_auditing_policies: azure.mgmt.synapse.operations.SqlPoolBlobAuditingPoliciesOperations
:ivar sql_pool_operations: SqlPoolOperationsOperations operations
:vartype sql_pool_operations: azure.mgmt.synapse.operations.SqlPoolOperationsOperations
:ivar sql_pool_usages: SqlPoolUsagesOperations operations
:vartype sql_pool_usages: azure.mgmt.synapse.operations.SqlPoolUsagesOperations
:ivar sql_pool_sensitivity_labels: SqlPoolSensitivityLabelsOperations operations
:vartype sql_pool_sensitivity_labels: azure.mgmt.synapse.operations.SqlPoolSensitivityLabelsOperations
:ivar sql_pool_recommended_sensitivity_labels: SqlPoolRecommendedSensitivityLabelsOperations operations
:vartype sql_pool_recommended_sensitivity_labels: azure.mgmt.synapse.operations.SqlPoolRecommendedSensitivityLabelsOperations
:ivar sql_pool_schemas: SqlPoolSchemasOperations operations
:vartype sql_pool_schemas: azure.mgmt.synapse.operations.SqlPoolSchemasOperations
:ivar sql_pool_tables: SqlPoolTablesOperations operations
:vartype sql_pool_tables: azure.mgmt.synapse.operations.SqlPoolTablesOperations
:ivar sql_pool_table_columns: SqlPoolTableColumnsOperations operations
:vartype sql_pool_table_columns: azure.mgmt.synapse.operations.SqlPoolTableColumnsOperations
:ivar sql_pool_connection_policies: SqlPoolConnectionPoliciesOperations operations
:vartype sql_pool_connection_policies: azure.mgmt.synapse.operations.SqlPoolConnectionPoliciesOperations
:ivar sql_pool_vulnerability_assessments: SqlPoolVulnerabilityAssessmentsOperations operations
:vartype sql_pool_vulnerability_assessments: azure.mgmt.synapse.operations.SqlPoolVulnerabilityAssessmentsOperations
:ivar sql_pool_vulnerability_assessment_scans: SqlPoolVulnerabilityAssessmentScansOperations operations
:vartype sql_pool_vulnerability_assessment_scans: azure.mgmt.synapse.operations.SqlPoolVulnerabilityAssessmentScansOperations
:ivar sql_pool_security_alert_policies: SqlPoolSecurityAlertPoliciesOperations operations
:vartype sql_pool_security_alert_policies: azure.mgmt.synapse.operations.SqlPoolSecurityAlertPoliciesOperations
:ivar sql_pool_vulnerability_assessment_rule_baselines: SqlPoolVulnerabilityAssessmentRuleBaselinesOperations operations
:vartype sql_pool_vulnerability_assessment_rule_baselines: azure.mgmt.synapse.operations.SqlPoolVulnerabilityAssessmentRuleBaselinesOperations
:ivar extended_sql_pool_blob_auditing_policies: ExtendedSqlPoolBlobAuditingPoliciesOperations operations
:vartype extended_sql_pool_blob_auditing_policies: azure.mgmt.synapse.operations.ExtendedSqlPoolBlobAuditingPoliciesOperations
:ivar data_masking_policies: DataMaskingPoliciesOperations operations
:vartype data_masking_policies: azure.mgmt.synapse.operations.DataMaskingPoliciesOperations
:ivar data_masking_rules: DataMaskingRulesOperations operations
:vartype data_masking_rules: azure.mgmt.synapse.operations.DataMaskingRulesOperations
:ivar sql_pool_columns: SqlPoolColumnsOperations operations
:vartype sql_pool_columns: azure.mgmt.synapse.operations.SqlPoolColumnsOperations
:ivar sql_pool_workload_group: SqlPoolWorkloadGroupOperations operations
:vartype sql_pool_workload_group: azure.mgmt.synapse.operations.SqlPoolWorkloadGroupOperations
:ivar sql_pool_workload_classifier: SqlPoolWorkloadClassifierOperations operations
:vartype sql_pool_workload_classifier: azure.mgmt.synapse.operations.SqlPoolWorkloadClassifierOperations
:ivar workspace_managed_sql_server_blob_auditing_policies: WorkspaceManagedSqlServerBlobAuditingPoliciesOperations operations
:vartype workspace_managed_sql_server_blob_auditing_policies: azure.mgmt.synapse.operations.WorkspaceManagedSqlServerBlobAuditingPoliciesOperations
:ivar workspace_managed_sql_server_extended_blob_auditing_policies: WorkspaceManagedSqlServerExtendedBlobAuditingPoliciesOperations operations
:vartype workspace_managed_sql_server_extended_blob_auditing_policies: azure.mgmt.synapse.operations.WorkspaceManagedSqlServerExtendedBlobAuditingPoliciesOperations
:ivar workspace_managed_sql_server_security_alert_policy: WorkspaceManagedSqlServerSecurityAlertPolicyOperations operations
:vartype workspace_managed_sql_server_security_alert_policy: azure.mgmt.synapse.operations.WorkspaceManagedSqlServerSecurityAlertPolicyOperations
:ivar workspace_managed_sql_server_vulnerability_assessments: WorkspaceManagedSqlServerVulnerabilityAssessmentsOperations operations
:vartype workspace_managed_sql_server_vulnerability_assessments: azure.mgmt.synapse.operations.WorkspaceManagedSqlServerVulnerabilityAssessmentsOperations
:ivar workspace_managed_sql_server_encryption_protector: WorkspaceManagedSqlServerEncryptionProtectorOperations operations
:vartype workspace_managed_sql_server_encryption_protector: azure.mgmt.synapse.operations.WorkspaceManagedSqlServerEncryptionProtectorOperations
:ivar workspace_managed_sql_server_usages: WorkspaceManagedSqlServerUsagesOperations operations
:vartype workspace_managed_sql_server_usages: azure.mgmt.synapse.operations.WorkspaceManagedSqlServerUsagesOperations
:ivar workspace_managed_sql_server_recoverable_sql_pools: WorkspaceManagedSqlServerRecoverableSqlPoolsOperations operations
:vartype workspace_managed_sql_server_recoverable_sql_pools: azure.mgmt.synapse.operations.WorkspaceManagedSqlServerRecoverableSqlPoolsOperations
:ivar workspaces: WorkspacesOperations operations
:vartype workspaces: azure.mgmt.synapse.operations.WorkspacesOperations
:ivar workspace_aad_admins: WorkspaceAadAdminsOperations operations
:vartype workspace_aad_admins: azure.mgmt.synapse.operations.WorkspaceAadAdminsOperations
:ivar workspace_sql_aad_admins: WorkspaceSqlAadAdminsOperations operations
:vartype workspace_sql_aad_admins: azure.mgmt.synapse.operations.WorkspaceSqlAadAdminsOperations
:ivar workspace_managed_identity_sql_control_settings: WorkspaceManagedIdentitySqlControlSettingsOperations operations
:vartype workspace_managed_identity_sql_control_settings: azure.mgmt.synapse.operations.WorkspaceManagedIdentitySqlControlSettingsOperations
:ivar restorable_dropped_sql_pools: RestorableDroppedSqlPoolsOperations operations
:vartype restorable_dropped_sql_pools: azure.mgmt.synapse.operations.RestorableDroppedSqlPoolsOperations
:ivar big_data_pools: BigDataPoolsOperations operations
:vartype big_data_pools: azure.mgmt.synapse.operations.BigDataPoolsOperations
:ivar library: LibraryOperations operations
:vartype library: azure.mgmt.synapse.operations.LibraryOperations
:ivar libraries: LibrariesOperations operations
:vartype libraries: azure.mgmt.synapse.operations.LibrariesOperations
:ivar integration_runtimes: IntegrationRuntimesOperations operations
:vartype integration_runtimes: azure.mgmt.synapse.operations.IntegrationRuntimesOperations
:ivar integration_runtime_node_ip_address: IntegrationRuntimeNodeIpAddressOperations operations
:vartype integration_runtime_node_ip_address: azure.mgmt.synapse.operations.IntegrationRuntimeNodeIpAddressOperations
:ivar integration_runtime_object_metadata: IntegrationRuntimeObjectMetadataOperations operations
:vartype integration_runtime_object_metadata: azure.mgmt.synapse.operations.IntegrationRuntimeObjectMetadataOperations
:ivar integration_runtime_nodes: IntegrationRuntimeNodesOperations operations
:vartype integration_runtime_nodes: azure.mgmt.synapse.operations.IntegrationRuntimeNodesOperations
:ivar integration_runtime_credentials: IntegrationRuntimeCredentialsOperations operations
:vartype integration_runtime_credentials: azure.mgmt.synapse.operations.IntegrationRuntimeCredentialsOperations
:ivar integration_runtime_connection_infos: IntegrationRuntimeConnectionInfosOperations operations
:vartype integration_runtime_connection_infos: azure.mgmt.synapse.operations.IntegrationRuntimeConnectionInfosOperations
:ivar integration_runtime_auth_keys: IntegrationRuntimeAuthKeysOperations operations
:vartype integration_runtime_auth_keys: azure.mgmt.synapse.operations.IntegrationRuntimeAuthKeysOperations
:ivar integration_runtime_monitoring_data: IntegrationRuntimeMonitoringDataOperations operations
:vartype integration_runtime_monitoring_data: azure.mgmt.synapse.operations.IntegrationRuntimeMonitoringDataOperations
:ivar integration_runtime_status: IntegrationRuntimeStatusOperations operations
:vartype integration_runtime_status: azure.mgmt.synapse.operations.IntegrationRuntimeStatusOperations
:ivar spark_configuration: SparkConfigurationOperations operations
:vartype spark_configuration: azure.mgmt.synapse.operations.SparkConfigurationOperations
:ivar spark_configurations: SparkConfigurationsOperations operations
:vartype spark_configurations: azure.mgmt.synapse.operations.SparkConfigurationsOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
base_url=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
if not base_url:
base_url = 'https://management.azure.com'
self._config = SynapseManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.azure_ad_only_authentications = AzureADOnlyAuthenticationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.ip_firewall_rules = IpFirewallRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.keys = KeysOperations(
self._client, self._config, self._serialize, self._deserialize)
self.private_endpoint_connections = PrivateEndpointConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.private_link_resources = PrivateLinkResourcesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.private_link_hub_private_link_resources = PrivateLinkHubPrivateLinkResourcesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.private_link_hubs = PrivateLinkHubsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.private_endpoint_connections_private_link_hub = PrivateEndpointConnectionsPrivateLinkHubOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sql_pools = SqlPoolsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sql_pool_metadata_sync_configs = SqlPoolMetadataSyncConfigsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sql_pool_operation_results = SqlPoolOperationResultsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sql_pool_geo_backup_policies = SqlPoolGeoBackupPoliciesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sql_pool_data_warehouse_user_activities = SqlPoolDataWarehouseUserActivitiesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sql_pool_restore_points = SqlPoolRestorePointsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sql_pool_replication_links = SqlPoolReplicationLinksOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sql_pool_maintenance_windows = SqlPoolMaintenanceWindowsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sql_pool_maintenance_window_options = SqlPoolMaintenanceWindowOptionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sql_pool_transparent_data_encryptions = SqlPoolTransparentDataEncryptionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sql_pool_blob_auditing_policies = SqlPoolBlobAuditingPoliciesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sql_pool_operations = SqlPoolOperationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sql_pool_usages = SqlPoolUsagesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sql_pool_sensitivity_labels = SqlPoolSensitivityLabelsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sql_pool_recommended_sensitivity_labels = SqlPoolRecommendedSensitivityLabelsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sql_pool_schemas = SqlPoolSchemasOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sql_pool_tables = SqlPoolTablesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sql_pool_table_columns = SqlPoolTableColumnsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sql_pool_connection_policies = SqlPoolConnectionPoliciesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sql_pool_vulnerability_assessments = SqlPoolVulnerabilityAssessmentsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sql_pool_vulnerability_assessment_scans = SqlPoolVulnerabilityAssessmentScansOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sql_pool_security_alert_policies = SqlPoolSecurityAlertPoliciesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sql_pool_vulnerability_assessment_rule_baselines = SqlPoolVulnerabilityAssessmentRuleBaselinesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.extended_sql_pool_blob_auditing_policies = ExtendedSqlPoolBlobAuditingPoliciesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.data_masking_policies = DataMaskingPoliciesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.data_masking_rules = DataMaskingRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sql_pool_columns = SqlPoolColumnsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sql_pool_workload_group = SqlPoolWorkloadGroupOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sql_pool_workload_classifier = SqlPoolWorkloadClassifierOperations(
self._client, self._config, self._serialize, self._deserialize)
self.workspace_managed_sql_server_blob_auditing_policies = WorkspaceManagedSqlServerBlobAuditingPoliciesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.workspace_managed_sql_server_extended_blob_auditing_policies = WorkspaceManagedSqlServerExtendedBlobAuditingPoliciesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.workspace_managed_sql_server_security_alert_policy = WorkspaceManagedSqlServerSecurityAlertPolicyOperations(
self._client, self._config, self._serialize, self._deserialize)
self.workspace_managed_sql_server_vulnerability_assessments = WorkspaceManagedSqlServerVulnerabilityAssessmentsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.workspace_managed_sql_server_encryption_protector = WorkspaceManagedSqlServerEncryptionProtectorOperations(
self._client, self._config, self._serialize, self._deserialize)
self.workspace_managed_sql_server_usages = WorkspaceManagedSqlServerUsagesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.workspace_managed_sql_server_recoverable_sql_pools = WorkspaceManagedSqlServerRecoverableSqlPoolsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.workspaces = WorkspacesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.workspace_aad_admins = WorkspaceAadAdminsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.workspace_sql_aad_admins = WorkspaceSqlAadAdminsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.workspace_managed_identity_sql_control_settings = WorkspaceManagedIdentitySqlControlSettingsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.restorable_dropped_sql_pools = RestorableDroppedSqlPoolsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.big_data_pools = BigDataPoolsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.library = LibraryOperations(
self._client, self._config, self._serialize, self._deserialize)
self.libraries = LibrariesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.integration_runtimes = IntegrationRuntimesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.integration_runtime_node_ip_address = IntegrationRuntimeNodeIpAddressOperations(
self._client, self._config, self._serialize, self._deserialize)
self.integration_runtime_object_metadata = IntegrationRuntimeObjectMetadataOperations(
self._client, self._config, self._serialize, self._deserialize)
self.integration_runtime_nodes = IntegrationRuntimeNodesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.integration_runtime_credentials = IntegrationRuntimeCredentialsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.integration_runtime_connection_infos = IntegrationRuntimeConnectionInfosOperations(
self._client, self._config, self._serialize, self._deserialize)
self.integration_runtime_auth_keys = IntegrationRuntimeAuthKeysOperations(
self._client, self._config, self._serialize, self._deserialize)
self.integration_runtime_monitoring_data = IntegrationRuntimeMonitoringDataOperations(
self._client, self._config, self._serialize, self._deserialize)
self.integration_runtime_status = IntegrationRuntimeStatusOperations(
self._client, self._config, self._serialize, self._deserialize)
self.spark_configuration = SparkConfigurationOperations(
self._client, self._config, self._serialize, self._deserialize)
self.spark_configurations = SparkConfigurationsOperations(
self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, http_request, **kwargs):
# type: (HttpRequest, Any) -> HttpResponse
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.HttpResponse
"""
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> SynapseManagementClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
| [
"noreply@github.com"
] | adriananeci.noreply@github.com |
0d8d655f64d764621a9762d4c449a17f82d6ac57 | b4d160ff9bc139752f04ead3c38b88cf2d91c8a2 | /Tests/DegenPrimer_Tests/Test_SecStructures.py | feda0edcb1189bb46d9320c9c0c9a697b3bbb902 | [] | no_license | allista/DegenPrimer | 2c69bf832f908601c28245c735db9b6b1efa9932 | c610551c9f6f769dcd03f945d7682471ea91bade | refs/heads/master | 2022-06-03T01:16:12.269221 | 2022-05-12T11:16:02 | 2022-05-12T11:16:02 | 45,181,326 | 7 | 4 | null | 2022-05-07T12:22:54 | 2015-10-29T12:20:21 | Python | UTF-8 | Python | false | false | 1,656 | py | # coding=utf-8
#
# Copyright (C) 2012 Allis Tauri <allista@gmail.com>
#
# degen_primer is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# degen_primer is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Created on 2016-01-14
@author: Allis Tauri <allista@gmail.com>
'''
def test():
import cProfile
import DegenPrimer.TD_Functions as tdf
from DegenPrimer.SecStructures import Duplex, reverse_complement, Dimer
tdf.PCR_P.Na = 50.0e-3
tdf.PCR_P.Mg = 3.0e-3
tdf.PCR_P.dNTP = 0.15e-6
tdf.PCR_P.DNA = 1.0e-9
tdf.PCR_P.DMSO = 0.0
tdf.PCR_P.PCR_T = 60.0
with tdf.AcquireParameters():
du = Duplex('AGAGAACGCAAAGATCGGGAAC', 'CTTGCGTTTCTAACCCTTG'[::-1], dimer=Dimer((3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16, 17, 18, 19, 20, 21), 3))
print du
cProfile.runctx('for x in xrange(100000): du.print_most_stable()',
globals(), locals(), 'Duplex.print_stable.profile')
# seq = 'ATGCGTCACTACCAGT'*10000
# cProfile.runctx('''for x in xrange(100):
# reverse_complement(seq)''',
# globals(), locals(), 'reverse_complement.profile')
test() | [
"allista@gmail.com"
] | allista@gmail.com |
c2de7d559633cefc527fd6e213dd4284f75e0499 | 94bb879816dbdd69559ecfcc70a09f33d104af67 | /source/functions/sqlmap/plugins/dbms/oracle/fingerprint.py | 3e471ca628ae7c75cc2d193a498e8d77b0777536 | [
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-other-permissive",
"GPL-2.0-only",
"LicenseRef-scancode-commercial-license",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-other-copyleft",
"GPL-1.0-or-later",
"GPL-CC-1.0",
"LicenseRef-scancode-unknown",
"MIT"
] | permissive | 51000000/CampusCyberInspectionTool2021 | f328ad571ab88051aa6928a67209dd94ce25eb6c | 27a2de7ff3707ba6ab084acfce79a7d3f42b8f84 | refs/heads/main | 2023-03-28T01:11:22.678066 | 2021-04-01T05:23:54 | 2021-04-01T05:23:54 | 353,502,239 | 0 | 0 | MIT | 2021-03-31T22:06:49 | 2021-03-31T22:06:48 | null | UTF-8 | Python | false | false | 3,859 | py | #!/usr/bin/env python
"""
Copyright (c) 2006-2021 sqlmap developers (http://sqlmap.org/)
See the file 'LICENSE' for copying permission
"""
import re
from lib.core.common import Backend
from lib.core.common import Format
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.enums import DBMS
from lib.core.session import setDbms
from lib.core.settings import ORACLE_ALIASES
from lib.request import inject
from plugins.generic.fingerprint import Fingerprint as GenericFingerprint
class Fingerprint(GenericFingerprint):
def __init__(self):
GenericFingerprint.__init__(self, DBMS.ORACLE)
def getFingerprint(self):
value = ""
wsOsFp = Format.getOs("web server", kb.headersFp)
if wsOsFp:
value += "%s\n" % wsOsFp
if kb.data.banner:
dbmsOsFp = Format.getOs("back-end DBMS", kb.bannerFp)
if dbmsOsFp:
value += "%s\n" % dbmsOsFp
value += "back-end DBMS: "
if not conf.extensiveFp:
value += DBMS.ORACLE
return value
actVer = Format.getDbms()
blank = " " * 15
value += "active fingerprint: %s" % actVer
if kb.bannerFp:
banVer = kb.bannerFp.get("dbmsVersion")
if banVer:
banVer = Format.getDbms([banVer])
value += "\n%sbanner parsing fingerprint: %s" % (blank, banVer)
htmlErrorFp = Format.getErrorParsedDBMSes()
if htmlErrorFp:
value += "\n%shtml error message fingerprint: %s" % (blank, htmlErrorFp)
return value
def checkDbms(self):
if not conf.extensiveFp and Backend.isDbmsWithin(ORACLE_ALIASES):
setDbms(DBMS.ORACLE)
self.getBanner()
return True
infoMsg = "testing %s" % DBMS.ORACLE
logger.info(infoMsg)
# NOTE: SELECT LENGTH(SYSDATE)=LENGTH(SYSDATE) FROM DUAL does
# not work connecting directly to the Oracle database
if conf.direct:
result = True
else:
result = inject.checkBooleanExpression("LENGTH(SYSDATE)=LENGTH(SYSDATE)")
if result:
infoMsg = "confirming %s" % DBMS.ORACLE
logger.info(infoMsg)
# NOTE: SELECT NVL(RAWTOHEX([RANDNUM1]),[RANDNUM1])=RAWTOHEX([RANDNUM1]) FROM DUAL does
# not work connecting directly to the Oracle database
if conf.direct:
result = True
else:
result = inject.checkBooleanExpression("NVL(RAWTOHEX([RANDNUM1]),[RANDNUM1])=RAWTOHEX([RANDNUM1])")
if not result:
warnMsg = "the back-end DBMS is not %s" % DBMS.ORACLE
logger.warn(warnMsg)
return False
setDbms(DBMS.ORACLE)
self.getBanner()
if not conf.extensiveFp:
return True
infoMsg = "actively fingerprinting %s" % DBMS.ORACLE
logger.info(infoMsg)
# Reference: https://en.wikipedia.org/wiki/Oracle_Database
for version in ("19c", "18c", "12c", "11g", "10g", "9i", "8i", "7"):
number = int(re.search(r"([\d]+)", version).group(1))
output = inject.checkBooleanExpression("%d=(SELECT SUBSTR((VERSION),1,%d) FROM SYS.PRODUCT_COMPONENT_VERSION WHERE ROWNUM=1)" % (number, 1 if number < 10 else 2))
if output:
Backend.setVersion(version)
break
return True
else:
warnMsg = "the back-end DBMS is not %s" % DBMS.ORACLE
logger.warn(warnMsg)
return False
def forceDbmsEnum(self):
if conf.db:
conf.db = conf.db.upper()
if conf.tbl:
conf.tbl = conf.tbl.upper()
| [
"55148245+51000000@users.noreply.github.com"
] | 55148245+51000000@users.noreply.github.com |
7c363abe67e32e125b760f67b97c0168b77ec74a | f13acd0d707ea9ab0d2f2f010717b35adcee142f | /ABC/abc201-abc250/abc248/c/main.py | f8dc5e6782ba90a0e4f2cd025916315e4147dbc5 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | KATO-Hiro/AtCoder | 126b9fe89fa3a7cffcbd1c29d42394e7d02fa7c7 | bf43320bc1af606bfbd23c610b3432cddd1806b9 | refs/heads/master | 2023-08-18T20:06:42.876863 | 2023-08-17T23:45:21 | 2023-08-17T23:45:21 | 121,067,516 | 4 | 0 | CC0-1.0 | 2023-09-14T21:59:38 | 2018-02-11T00:32:45 | Python | UTF-8 | Python | false | false | 608 | py | # -*- coding: utf-8 -*-
def main():
import sys
input = sys.stdin.readline
n, m, k = map(int, input().split())
size = n * m + 10
dp = [0 for _ in range(size)]
mod = 998244353
for i in range(m):
dp[i] = 1
for i in range(n - 1):
ndp = [0 for _ in range(size)]
for j in range(k + 1):
for x in range(1, m + 1):
if j + x >= k:
continue
ndp[j + x] += dp[j]
ndp[j + x] %= mod
dp = ndp
print(sum(dp) % mod)
if __name__ == "__main__":
main()
| [
"k.hiro1818@gmail.com"
] | k.hiro1818@gmail.com |
5ef51b0a8c32b67780cc1894aa59fcc513e7b686 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_3_1_neat/16_3_1_bsoist_a.py | 6d05be7fb34134bb8fb5c0df300c6d30f8e63afe | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 1,334 | py | from string import uppercase
import sys
import itertools
def no_majority(nums):
total = sum(nums) * 1.0
if total == 0:
return True
for num in nums:
if num / total > 0.5:
return False
return True
def get_indexes(indexes):
for a, b in itertools.permutations(indexes,r=2):
yield a,b
for index in indexes:
yield index
def get_step(parties):
indexes = [i for (i,n) in enumerate(parties) if n]
for a, b in itertools.permutations(indexes,r=2):
step = [None, None]
remaining_senators = parties[:]
if remaining_senators[a]:
step[0] = a
remaining_senators[a] -= 1
if remaining_senators[b]:
step[1] = b
remaining_senators[b] -= 1
if no_majority(remaining_senators):
return step
return None, parties.index(max(parties))
for case_num in xrange(1,int(raw_input()) + 1):
raw_input()
in_parties = map(int, raw_input().split(" "))
plan = []
while sum(in_parties) > 0:
a,b = get_step(in_parties)
plan.append("".join([uppercase[n] for n in (a,b) if n is not None]))
if a is not None:
in_parties[a] -= 1
if b is not None:
in_parties[b] -= 1
print "Case #%s: %s" % (case_num, " ".join(plan))
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
c2313614e908cd332d7449209ae7c9002a1fad36 | 4e39dbcd39c746dc661478d601d5e9ae0893b084 | /TensorFlow2/Segmentation/MaskRCNN/mask_rcnn/distributed_executer.py | 39307cc3ae086fef94b31cc82efb197fe802b345 | [
"Apache-2.0"
] | permissive | gpauloski/DeepLearningExamples | 2ff368cf0414ad8451a85465f023a94d1a5753f9 | 81178d2aa6e6eaa88c40727276601b52739ba408 | refs/heads/master | 2023-02-03T13:33:41.822429 | 2020-12-14T16:52:31 | 2020-12-14T16:52:31 | 254,721,527 | 2 | 0 | null | 2020-04-10T19:42:36 | 2020-04-10T19:42:35 | null | UTF-8 | Python | false | false | 20,775 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface to run mask rcnn model in different distributed strategies."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import os
import six
import math
import multiprocessing
import tensorflow as tf
from mask_rcnn.utils.logging_formatter import logging
from mask_rcnn.utils.distributed_utils import MPI_is_distributed
from mask_rcnn.utils.distributed_utils import MPI_local_rank
from mask_rcnn.utils.distributed_utils import MPI_rank
from mask_rcnn.hooks.logging_hook import AutoLoggingHook
from mask_rcnn.utils.lazy_imports import LazyImport
hvd = LazyImport("horovod.tensorflow")
from tensorflow.core.protobuf import rewriter_config_pb2
from mask_rcnn import evaluation
from mask_rcnn.hyperparameters import params_io
from mask_rcnn.hooks import CheckpointSaverHook
from mask_rcnn.hooks import PretrainedWeightsLoadingHook
def get_training_hooks(mode, model_dir, checkpoint_path=None, skip_checkpoint_variables=None):
assert mode in ('train', 'eval')
training_hooks = [
AutoLoggingHook(
# log_every_n_steps=RUNNING_CONFIG.display_step,
log_every_n_steps=5 if "NGC_JOB_ID" not in os.environ else 100,
# warmup_steps=RUNNING_CONFIG.warmup_steps,
warmup_steps=100,
is_training=True
)
]
if not MPI_is_distributed() or MPI_rank() == 0:
training_hooks.append(PretrainedWeightsLoadingHook(
prefix="resnet50/",
checkpoint_path=checkpoint_path,
skip_variables_regex=skip_checkpoint_variables
))
if MPI_is_distributed() and mode == "train":
training_hooks.append(hvd.BroadcastGlobalVariablesHook(root_rank=0))
if not MPI_is_distributed() or MPI_rank() == 0:
training_hooks.append(CheckpointSaverHook(
checkpoint_dir=model_dir,
checkpoint_basename="model.ckpt"
))
return training_hooks
@six.add_metaclass(abc.ABCMeta)
class BaseExecuter(object):
"""Interface to run Mask RCNN model in TPUs/GPUs.
Arguments:
flags: FLAGS object passed from the user.
model_config: Model configuration needed to run distribution strategy.
model_fn: Model function to be passed to Estimator.
"""
def __init__(self, runtime_config, model_fn):
self._runtime_config = runtime_config
self._model_fn = model_fn
os.environ['CUDA_CACHE_DISABLE'] = '0'
os.environ['TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT'] = '1'
os.environ['TF_ADJUST_HUE_FUSED'] = '1'
os.environ['TF_ADJUST_SATURATION_FUSED'] = '1'
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
os.environ['TF_AUTOTUNE_THRESHOLD'] = '2'
@staticmethod
def _get_session_config(mode, use_xla, use_amp, use_tf_distributed=False, allow_xla_at_inference=False):
assert mode in ('train', 'eval')
rewrite_options = rewriter_config_pb2.RewriterConfig(
# arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF,
# arithmetic_optimization=rewriter_config_pb2.RewriterConfig.ON,
# constant_folding=rewriter_config_pb2.RewriterConfig.OFF,
# constant_folding=rewriter_config_pb2.RewriterConfig.ON, # TO TEST
# debug_stripper=rewriter_config_pb2.RewriterConfig.OFF,
# debug_stripper=rewriter_config_pb2.RewriterConfig.ON, # TO TEST
# dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF,
# dependency_optimization=rewriter_config_pb2.RewriterConfig.ON, # TO TEST
# disable_model_pruning=False, # INCOMPATIBLE with AMP
# function_optimization=True,
# implementation_selector=True,
# loop_optimization=rewriter_config_pb2.RewriterConfig.OFF,
# loop_optimization=rewriter_config_pb2.RewriterConfig.ON, # TO TEST
# The default setting (SCHEDULING and SWAPPING HEURISTICS only)
# memory_optimization=rewriter_config_pb2.RewriterConfig.DEFAULT_MEM_OPT,
# Disabled in the meta-optimizer.
# memory_optimization=rewriter_config_pb2.RewriterConfig.NO_MEM_OPT,
# Driven by manual op-level annotations.
# memory_optimization=rewriter_config_pb2.RewriterConfig.MANUAL,
# Swapping heuristic will move a tensor from the GPU to the CPU and move it
# back when needed to reduce peak memory usage..
# memory_optimization=rewriter_config_pb2.RewriterConfig.SWAPPING_HEURISTICS,
# Recomputation heuristics will recompute ops (such as Relu activation)
# during backprop instead of storing them, reducing peak memory usage.
# memory_optimization=rewriter_config_pb2.RewriterConfig.RECOMPUTATION_HEURISTICS,
# Scheduling will split big ops such as AddN and try to enforce a schedule of
# the new computations that decreases peak memory usage.
# memory_optimization=rewriter_config_pb2.RewriterConfig.SCHEDULING_HEURISTICS,
# Use any combination of swapping and recomputation heuristics.
# memory_optimization=rewriter_config_pb2.RewriterConfig.HEURISTICS,
meta_optimizer_iterations=rewriter_config_pb2.RewriterConfig.TWO,
# meta_optimizer_iterations=rewriter_config_pb2.RewriterConfig.ONE,
# meta_optimizer_iterations=rewriter_config_pb2.RewriterConfig.DEFAULT_NUM_ITERS,
# pin_to_host_optimization=rewriter_config_pb2.RewriterConfig.OFF,
# pin_to_host_optimization=rewriter_config_pb2.RewriterConfig.ON, # TO TEST
#
# remapping=rewriter_config_pb2.RewriterConfig.OFF,
# remapping=rewriter_config_pb2.RewriterConfig.ON, # TO TEST
# scoped_allocator_optimization=rewriter_config_pb2.RewriterConfig.OFF,
# scoped_allocator_optimization=rewriter_config_pb2.RewriterConfig.ON, # TO TEST
# shape_optimization=rewriter_config_pb2.RewriterConfig.OFF,
# shape_optimization=rewriter_config_pb2.RewriterConfig.ON, # TO TEST
)
if use_amp:
logging.info("[%s] AMP is activated - Experiment Feature" % mode)
rewrite_options.auto_mixed_precision = True
config = tf.compat.v1.ConfigProto(
allow_soft_placement=True,
log_device_placement=False,
graph_options=tf.compat.v1.GraphOptions(
rewrite_options=rewrite_options,
# infer_shapes=True # Heavily drops throughput by 30%
)
)
if use_tf_distributed:
config.gpu_options.force_gpu_compatible = False
else:
config.gpu_options.force_gpu_compatible = True # Force pinned memory
if MPI_is_distributed():
config.gpu_options.visible_device_list = str(MPI_local_rank())
if use_xla and (mode == "train" or allow_xla_at_inference):
logging.info("[%s] XLA is activated - Experiment Feature" % mode)
config.graph_options.optimizer_options.global_jit_level = tf.compat.v1.OptimizerOptions.ON_1
# config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_2
if mode == 'train':
config.intra_op_parallelism_threads = 1 # Avoid pool of Eigen threads
if MPI_is_distributed():
config.inter_op_parallelism_threads = max(2, multiprocessing.cpu_count() // hvd.local_size())
elif not use_tf_distributed:
config.inter_op_parallelism_threads = 4
return config
@abc.abstractmethod
def build_strategy_configuration(self, mode):
"""Builds run configuration for distributed train/eval.
Returns:
RunConfig with distribution strategy configurations
to pass to the constructor of TPUEstimator/Estimator.
"""
NotImplementedError('Must be implemented in subclass')
def build_model_parameters(self, mode):
"""Builds model parameter."""
assert mode in ('train', 'eval')
batch_size = self._runtime_config.train_batch_size if mode == 'train' else self._runtime_config.eval_batch_size
params = dict(
self._runtime_config.values(),
mode=mode,
batch_size=batch_size,
model_dir=self._runtime_config.model_dir,
)
if mode == 'eval':
params = dict(
params,
augment_input_data=False,
)
return params
def build_mask_rcnn_estimator(self, params, run_config, mode):
"""Creates TPUEstimator/Estimator instance.
Arguments:
params: A dictionary to pass to Estimator `model_fn`.
run_config: RunConfig instance specifying distribution strategy
configurations.
mode: Mode -- one of 'train` or `eval`.
Returns:
TFEstimator or TPUEstimator instance.
"""
assert mode in ('train', 'eval')
return tf.estimator.Estimator(
model_fn=self._model_fn,
model_dir=self._runtime_config.model_dir,
config=run_config,
params=params
)
def _save_config(self):
"""Save parameters to config files if model_dir is defined."""
model_dir = self._runtime_config.model_dir
if model_dir is not None:
if not tf.io.gfile.exists(model_dir):
tf.io.gfile.makedirs(model_dir)
params_io.save_hparams_to_yaml(self._runtime_config, model_dir + '/params.yaml')
def _write_summary(self, summary_dir, eval_results, predictions, current_step):
if not self._runtime_config.visualize_images_summary:
predictions = None
evaluation.write_summary(eval_results, summary_dir, current_step, predictions=predictions)
def train(self, train_input_fn, run_eval_after_train=False, eval_input_fn=None):
"""Run distributed training on Mask RCNN model."""
self._save_config()
train_run_config = self.build_strategy_configuration('train')
train_params = self.build_model_parameters('train')
train_estimator = self.build_mask_rcnn_estimator(train_params, train_run_config, 'train')
train_estimator.train(
input_fn=train_input_fn,
max_steps=self._runtime_config.total_steps,
hooks=get_training_hooks(
mode="train",
model_dir=self._runtime_config.model_dir,
checkpoint_path=self._runtime_config.checkpoint,
skip_checkpoint_variables=self._runtime_config.skip_checkpoint_variables
)
)
if not run_eval_after_train:
return None
if eval_input_fn is None:
raise ValueError('Eval input_fn must be passed to conduct evaluation after training.')
eval_run_config = self.build_strategy_configuration('eval')
eval_params = self.build_model_parameters('eval')
eval_estimator = self.build_mask_rcnn_estimator(eval_params, eval_run_config, 'eval')
last_ckpt = tf.train.latest_checkpoint(self._runtime_config.model_dir, latest_filename=None)
logging.info("Restoring parameters from %s\n" % last_ckpt)
eval_results, predictions = evaluation.evaluate(
eval_estimator,
eval_input_fn,
self._runtime_config.eval_samples,
self._runtime_config.eval_batch_size,
self._runtime_config.include_mask,
self._runtime_config.val_json_file,
report_frequency=self._runtime_config.report_frequency
)
output_dir = os.path.join(self._runtime_config.model_dir, 'eval')
tf.io.gfile.makedirs(output_dir)
# Summary writer writes out eval metrics.
self._write_summary(output_dir, eval_results, predictions, self._runtime_config.total_steps)
return eval_results
def train_and_eval(self, train_input_fn, eval_input_fn):
"""Run distributed train and eval on Mask RCNN model."""
self._save_config()
output_dir = os.path.join(self._runtime_config.model_dir, 'eval')
tf.io.gfile.makedirs(output_dir)
train_run_config = self.build_strategy_configuration('train')
train_params = self.build_model_parameters('train')
train_estimator = self.build_mask_rcnn_estimator(train_params, train_run_config, 'train')
eval_estimator = None
eval_results = None
num_cycles = math.ceil(self._runtime_config.total_steps / self._runtime_config.num_steps_per_eval)
training_hooks = get_training_hooks(
mode="train",
model_dir=self._runtime_config.model_dir,
checkpoint_path=self._runtime_config.checkpoint,
skip_checkpoint_variables=self._runtime_config.skip_checkpoint_variables
)
for cycle in range(1, num_cycles + 1):
if not MPI_is_distributed() or MPI_rank() == 0:
print() # Visual Spacing
logging.info("=================================")
logging.info(' Start training cycle %02d' % cycle)
logging.info("=================================\n")
max_cycle_step = min(int(cycle * self._runtime_config.num_steps_per_eval), self._runtime_config.total_steps)
PROFILER_ENABLED = False
if (not MPI_is_distributed() or MPI_rank() == 0) and PROFILER_ENABLED:
profiler_context_manager = tf.contrib.tfprof.ProfileContext
else:
from contextlib import suppress
profiler_context_manager = lambda *args, **kwargs: suppress() # No-Op context manager
with profiler_context_manager(
'/workspace/profiling/',
trace_steps=range(100, 200, 3),
dump_steps=[200]
) as pctx:
if (not MPI_is_distributed() or MPI_rank() == 0) and PROFILER_ENABLED:
opts = tf.compat.v1.profiler.ProfileOptionBuilder.time_and_memory()
pctx.add_auto_profiling('op', opts, [150, 200])
train_estimator.train(
input_fn=train_input_fn,
max_steps=max_cycle_step,
hooks=training_hooks,
)
if not MPI_is_distributed() or MPI_rank() == 0:
print() # Visual Spacing
logging.info("=================================")
logging.info(' Start evaluation cycle %02d' % cycle)
logging.info("=================================\n")
if eval_estimator is None:
eval_run_config = self.build_strategy_configuration('eval')
eval_params = self.build_model_parameters('eval')
eval_estimator = self.build_mask_rcnn_estimator(eval_params, eval_run_config, 'eval')
last_ckpt = tf.train.latest_checkpoint(self._runtime_config.model_dir, latest_filename=None)
logging.info("Restoring parameters from %s\n" % last_ckpt)
eval_results, predictions = evaluation.evaluate(
eval_estimator,
eval_input_fn,
self._runtime_config.eval_samples,
self._runtime_config.eval_batch_size,
self._runtime_config.include_mask,
self._runtime_config.val_json_file,
report_frequency=self._runtime_config.report_frequency
)
self._write_summary(output_dir, eval_results, predictions, max_cycle_step)
if MPI_is_distributed():
from mpi4py import MPI
MPI.COMM_WORLD.Barrier() # Waiting for all MPI processes to sync
return eval_results
def eval(self, eval_input_fn):
"""Run distributed eval on Mask RCNN model."""
output_dir = os.path.join(self._runtime_config.model_dir, 'eval')
tf.io.gfile.makedirs(output_dir)
# Summary writer writes out eval metrics.
run_config = self.build_strategy_configuration('eval')
eval_params = self.build_model_parameters('eval')
eval_estimator = self.build_mask_rcnn_estimator(eval_params, run_config, 'eval')
logging.info('Starting to evaluate.')
last_ckpt = tf.train.latest_checkpoint(self._runtime_config.model_dir, latest_filename=None)
if last_ckpt is not None:
logging.info("Restoring parameters from %s\n" % last_ckpt)
current_step = int(os.path.basename(last_ckpt).split('-')[1])
else:
logging.warning(
"Could not find trained model in model_dir: `%s`, running initialization to predict\n" %
self._runtime_config.model_dir
)
current_step = 0
eval_results, predictions = evaluation.evaluate(
eval_estimator,
eval_input_fn,
self._runtime_config.eval_samples,
self._runtime_config.eval_batch_size,
self._runtime_config.include_mask,
self._runtime_config.val_json_file
)
self._write_summary(output_dir, eval_results, predictions, current_step)
if current_step >= self._runtime_config.total_steps:
logging.info('Evaluation finished after training step %d' % current_step)
return eval_results
class EstimatorExecuter(BaseExecuter):
"""Interface that runs Mask RCNN model using TPUEstimator."""
def __init__(self, runtime_config, model_fn):
super(EstimatorExecuter, self).__init__(runtime_config, model_fn)
if MPI_is_distributed():
os.environ['HOROVOD_GPU_ALLREDUCE'] = 'NCCL'
os.environ['HOROVOD_NUM_NCCL_STREAMS'] = '1'
# os.environ['HOROVOD_AUTOTUNE'] = '2'
hvd.init()
logging.info("Horovod successfully initialized ...")
os.environ['TF_GPU_THREAD_MODE'] = 'gpu_private'
os.environ['TF_GPU_THREAD_COUNT'] = '1' if not MPI_is_distributed() else str(hvd.size())
os.environ['TF_SYNC_ON_FINISH'] = '0'
def build_strategy_configuration(self, mode):
"""Retrieves model configuration for running TF Estimator."""
run_config = tf.estimator.RunConfig(
tf_random_seed=(
self._runtime_config.seed
if not MPI_is_distributed() or self._runtime_config.seed is None else
self._runtime_config.seed + MPI_rank()
),
model_dir=self._runtime_config.model_dir,
save_summary_steps=None, # disabled
save_checkpoints_steps=None, # disabled
save_checkpoints_secs=None, # disabled
keep_checkpoint_max=20, # disabled
keep_checkpoint_every_n_hours=None, # disabled
log_step_count_steps=None, # disabled
session_config=self._get_session_config(
mode=mode,
use_xla=self._runtime_config.use_xla,
use_amp=self._runtime_config.use_amp,
use_tf_distributed=False,
allow_xla_at_inference=self._runtime_config.allow_xla_at_inference # TODO: Remove when XLA at inference fixed
),
protocol=None,
device_fn=None,
train_distribute=None,
eval_distribute=None,
experimental_distribute=None
)
return run_config
class TFDistributedExecuter(BaseExecuter):
"""Interface that runs Mask RCNN model using MultiWorkerMirroredStrategy."""
@staticmethod
def is_eval_task():
return tf.distribute.cluster_resolver.TFConfigClusterResolver().task_type == 'evaluator'
def build_strategy_configuration(self, mode):
"""Retrieves model configuration for MultiWorkerMirroredStrategy."""
distributed_strategy = tf.distribute.MirroredStrategy()
# distributed_strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy(
# tf.distribute.experimental.CollectiveCommunication.NCCL
# )
run_config = tf.estimator.RunConfig(
tf_random_seed=self._runtime_config.seed,
model_dir=self._runtime_config.model_dir,
save_summary_steps=None, # disabled
save_checkpoints_steps=None, # disabled
save_checkpoints_secs=None, # disabled
keep_checkpoint_max=20, # disabled
keep_checkpoint_every_n_hours=None, # disabled
log_step_count_steps=None, # disabled
session_config=self._get_session_config(
mode=mode,
use_xla=self._runtime_config.use_xla,
use_amp=self._runtime_config.use_amp,
use_tf_distributed=True,
# TODO: Remove when XLA at inference fixed
allow_xla_at_inference=self._runtime_config.allow_xla_at_inference
),
protocol=None,
device_fn=None,
train_distribute=distributed_strategy if mode == "train" else None,
eval_distribute=None,
experimental_distribute=None
)
return run_config
| [
"41076710+nvpstr@users.noreply.github.com"
] | 41076710+nvpstr@users.noreply.github.com |
1818b2f90162f7c979b4d61341cd65944efcddbd | 7cd6a7bc72f0026056a7238c0feea081bfff13a7 | /bioprocs/chipseq.py | 17d1dc9bfa97f4b60ece6b03c19b086b816f8d32 | [
"MIT"
] | permissive | shijianasdf/biopipen | 8d963ccca38e2a9d7a46582a5eec45c38924655c | d53b78aa192fd56a5da457463b099b2aa833b284 | refs/heads/master | 2023-08-18T18:28:03.306877 | 2019-12-31T16:17:35 | 2019-12-31T16:17:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,136 | py | """ChIP-seq data analysis"""
from pyppl import Proc
from diot import Diot
from . import params, proc_factory
pPeakToRegPotential = proc_factory(
desc = 'Convert peaks to regulatory potential score for each gene.',
config = Diot(annotate = """
@name:
pPeakToRegPotential
@description:
Convert peaks to regulatory potential score for each gene
The formula is:
```
-(0.5 + 4*di/d0)
PC = sum (pi * e )
```
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4489297/
@input:
`peakfile:file`: The BED/peak file for peaks
`genefile:file`: The BED file for gene coordinates
@output:
`outfile:file`: The regulatory potential file for each gene
@args:
`signal`: `pi` in the formula. Boolean value, whether use the peak intensity signale or not, default: `True`,
`genefmt`: The format for `genefile`, default: `ucsc+gz`. It could be:
- ucsc or ucsc+gz: typically, you can download from http://hgdownload.cse.ucsc.edu/goldenPath/hg38/database/refGene.txt.gz
- bed or bed+gz: [format](https://genome.ucsc.edu/FAQ/FAQformat#format1), 4th column required as gene identity.
`peakfmt`: The format for `peakfile`, default: `peak`. It could be:
- peak or peak+gz: (either [narrowPeak](https://genome.ucsc.edu/FAQ/FAQformat.html#format12) or [broadPeak](https://genome.ucsc.edu/FAQ/FAQformat.html#format13), the 7th column will be used as intensity
- bed or bed+gz: [format](https://genome.ucsc.edu/FAQ/FAQformat#format1), 5th column will be used as intensity.
`window`: `2 * d0` in the formula. The window where the peaks fall in will be consided, default: `100000`.
```
|--------- window ----------|
|---- d0 -----|
|--- 50K --- TSS --- 50K ---|
^ (peak center)
|-- di --|
```
"""))
pPeakToRegPotential.input = "peakfile:file, genefile:file"
pPeakToRegPotential.output = "outfile:file:{{peakfile | fn}}.rp.txt"
pPeakToRegPotential.args.signal = True
pPeakToRegPotential.args.genefmt = 'ucsc+gz',
pPeakToRegPotential.args.peakfmt = 'peak',
pPeakToRegPotential.args.window = 100000
pPeakToRegPotential.lang = params.python.value
| [
"pwwang@pwwang.com"
] | pwwang@pwwang.com |
fbe02062236e4bf25f50e0a33ec6a97037677819 | 9431bba2d148f8aef9c0a8f3ca16fcf875890757 | /subprocessExer/sleep.py | de3ab7f2f7409196e54a69d6e37f13343f7cbe82 | [
"MIT"
] | permissive | terasakisatoshi/pythonCodes | fba0b78414b2c85f4a738200354ea583f0516768 | 953210c06e9885a7c885bc01047715a77de08a1a | refs/heads/master | 2023-05-14T12:30:22.201711 | 2023-05-07T13:41:22 | 2023-05-07T13:41:22 | 197,893,702 | 2 | 1 | MIT | 2022-11-25T10:59:52 | 2019-07-20T07:09:12 | Jupyter Notebook | UTF-8 | Python | false | false | 30 | py | import time
time.sleep(10)
| [
"terasakisatoshi.math@gmail.com"
] | terasakisatoshi.math@gmail.com |
83baef5baf27610966bd935ce8ca757d41264c46 | 8b441f592a6deb9b0a515cbd92bb4663ad79ffe4 | /churn_nrt/src/utils/date_functions.py | 5e913922995f1eedd9764317fc84f84cfc6caa46 | [] | no_license | carnaum2/use-cases | 0d391a6a10bb70b60a4025152a278b0e4c595d01 | 24920e3828234da691ab643b6dd9a0aa0a5c0df5 | refs/heads/master | 2022-12-07T03:41:34.299274 | 2020-09-07T10:20:32 | 2020-09-07T10:20:32 | 293,249,567 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,491 | py |
import datetime as dt
from dateutil.relativedelta import relativedelta
import pandas as pd
import sys
from pyspark.sql.functions import year
def get_next_month(yearmonth):
"""
Given a yyyymm string, returns the yyyymm string
for the next month.
"""
current_month = dt.datetime.strptime(yearmonth, "%Y%m")
return (dt.datetime(current_month.year, current_month.month, 28) + dt.timedelta(days=4)).strftime("%Y%m")
def get_closure(yearmonth_str):
"""
Given a yyyymm string, returns the closure date (last day of month)
"""
next_yearmonth_str = get_next_month(yearmonth_str)
next_datetime = dt.datetime.strptime(next_yearmonth_str, "%Y%m")
return (next_datetime + dt.timedelta(days=-1)).strftime("%Y%m%d")
def get_last_day_of_month(mydate):
"""
Given a string date (format YYYY-MM-DD or YYYYMMDD) or a datetime object,
returns the last day of the month.
Eg. mydate=2018-03-01 --> returns 2018-03-31
The result is given in the same type as the input
"""
if isinstance(mydate,str) or isinstance(mydate,unicode):
my_date_fmt = "%Y-%m-%d" if "-" in mydate else "%Y%m%d" # type: str
mydate_obj = dt.datetime.strptime(mydate, my_date_fmt)
else:
mydate_obj = mydate
my_date_fmt=None
last_day_of_the_month = dt.datetime((mydate_obj + relativedelta(months=1)).year,
(mydate_obj + relativedelta(months=1)).month, 1) - dt.timedelta(days=1)
if isinstance(mydate, str) or isinstance(mydate,unicode):
return dt.datetime.strftime(last_day_of_the_month, my_date_fmt)
return last_day_of_the_month
def months_range(start_yearmonth, end_yearmonth):
"""
start_yearmonth = "201803"
end_yearmonth = "201806"
returns ['201803', '201804', '201805', '201806']
"""
rr = pd.date_range(dt.datetime.strptime(start_yearmonth, "%Y%m"), dt.datetime.strptime(end_yearmonth, "%Y%m"), freq="MS")
return [yearmonth.strftime("%Y%m") for yearmonth in rr]
def days_range(start_yyyymmdd, end_yyyymmdd):
"""
start_yyyymmdd = "20190129"
end_yyyymmdd = "20190204"
returns ['20190129', '20190130', '20190131', '20190201', '20190202', '20190203', '20190204']
"""
rr = pd.date_range(dt.datetime.strptime(start_yyyymmdd, "%Y%m%d"), dt.datetime.strptime(end_yyyymmdd, "%Y%m%d"), freq="D")
return [d.strftime("%Y%m%d") for d in rr]
def get_previous_cycle(date_, str_fmt="%Y%m%d"):
if isinstance(date_,str) or isinstance(date_,unicode) :
date_obj = dt.datetime.strptime(date_, str_fmt)
else:
date_obj = date_
day = date_obj.day
if day <= 7:
latest_cycle = get_last_day_of_month(date_obj + relativedelta(months=-1))
elif day>=28:
latest_cycle = dt.datetime.strptime(
"{}{:02d}{:02d}".format(date_obj.year, date_obj.month, 21), "%Y%m%d")
else:
new_day = date_obj.day - date_obj.day % 7 if date_obj.day%7!=0 else (date_obj.day - 7)
latest_cycle = dt.datetime.strptime(
"{}{:02d}{:02d}".format(date_obj.year, date_obj.month, new_day), "%Y%m%d")
return latest_cycle.strftime(str_fmt) if (isinstance(date_, str) or isinstance(date_, unicode)) else latest_cycle
def get_next_cycle(date_, str_fmt="%Y%m%d"):
if isinstance(date_,str) or isinstance(date_,unicode):
date_obj = dt.datetime.strptime(date_, str_fmt)
else:
date_obj = date_
if date_obj.day < 21:
next_cycle = dt.datetime.strptime(
"{}{:02d}{:02d}".format(date_obj.year, date_obj.month, (int(date_obj.day / 7) + 1) * 7), "%Y%m%d")
elif get_last_day_of_month(date_)==date_:
next_cycle = get_next_cycle(date_obj + relativedelta(days=+1)) # advance one day and look for the next cycle
else:
next_cycle = get_last_day_of_month(date_obj)
return next_cycle.strftime(str_fmt) if (isinstance(date_, str) or isinstance(date_, unicode)) else next_cycle
def is_cycle(date_, str_fmt="%Y%m%d"):
'''
True if date_ is a cycle (valid date to be used in closing_day for car, deliveries, ...)
:param date_:
:param str_fmt:
:return:
'''
if isinstance(date_,str) or isinstance(date_,unicode):
date_obj = dt.datetime.strptime(date_, str_fmt)
else:
date_obj = date_
return date_obj == get_next_cycle(get_previous_cycle(date_obj))
def move_date_n_yearmonths(yyyymm, n):
'''
if n is positive --> move the date forward
:param date_: str with format YYYYMM or YYYYMMDD
:param n:
:param str_fmt:
:return:
'''
if n==0: return yyyymm
return move_date_n_cycles(yyyymm+"01", 4*n, str_fmt="%Y%m%d")[:6]
def move_date_n_cycles(date_, n, str_fmt="%Y%m%d"):
'''
if n is positive --> move the date forward
:param date_:
:param n:
:param str_fmt:
:return:
'''
if n==0: return date_
date_i = date_
for i in range(0, abs(n)):
date_i = get_next_cycle(date_i, str_fmt=str_fmt) if n>0 else get_previous_cycle(date_i, str_fmt=str_fmt)
return date_i
def move_date_n_days(_date, n, str_fmt="%Y%m%d"):
""""
Returns a date corresponding to the previous day. Keeps the input format
"""
if n==0: return _date
if isinstance(_date,str) or isinstance(_date,unicode):
date_obj = dt.datetime.strptime(_date, str_fmt)
else:
date_obj = _date
yesterday_obj = (date_obj + dt.timedelta(days=n))
return yesterday_obj.strftime(str_fmt) if (isinstance(_date,str) or isinstance(_date, unicode)) else yesterday_obj
def convert_to_date(dd_str):
import datetime as dt
if dd_str in [None, ""] or dd_str != dd_str: return None
dd_obj = dt.datetime.strptime(dd_str.replace("-", "").replace("/", ""), "%Y%m%d")
if dd_obj < dt.datetime.strptime("19000101", "%Y%m%d"):
return None
return dd_obj.strftime("%Y-%m-%d %H:%M:%S") if dd_str and dd_str == dd_str else dd_str
def count_nb_cycles(date_start, date_end):
'''
Return the number of cycles between date_start and date_end
If date_start < date_end --> returns a positive number
If date_start > date_end --> return a negativa number
:param date_start:
:param date_end:
:return:
'''
num_cycles = 0
date_A = date_start
date_B = date_end
delta = +1
if date_start > date_end:
delta = -1
date_B = date_start
date_A = date_end
if date_A < date_B:
dd = date_A
while dd < date_B:
dd = move_date_n_cycles(dd, n=+1)
num_cycles = num_cycles + delta
return num_cycles
return num_cycles
def get_next_dow(weekday, from_date=None):
'''
weekday: weekday is 1 for monday; 2 for tuesday; ...; 7 for sunday.
E.g. Today is Tuesday 11-June-2019, we run the function get_next_dow(dow=5) [get next friday] and the function returns datetime.date(2019, 6, 14) [14-June-2019]
E.g. Today is Tuesday 11-June-2019, we run the function get_next_dow(dow=2) [get next tuesday] and the function returns datetime.date(2019, 6, 11) [11-June-2019, Today]
Note: weekday=0 is the same as weekday=7
Note: this function runs with isoweekday (monday is 1 not 0)
from_date: if from_date != None, instead of using today uses this day.
'''
from_date = from_date if from_date else dt.date.today()
return from_date + dt.timedelta( (weekday-from_date.isoweekday()) % 7 )
def get_diff_days(start_date, end_date, format_date="%Y%m%d"):
'''
Compute the difference (in days) between end_date and start_date.
Difference is positive if end_date > start_date
:param start_date: str with a date. if not specified, format is assumed to be YYYYMMDD
:param end_date: str with a date. if not specified, format is assumed to be YYYYMMDD
:param format_date:
:return:
'''
d0 = dt.datetime.strptime(start_date, format_date)
d1 = dt.datetime.strptime(end_date, format_date)
return (d1 - d0).days
def is_null_date(fecha):
YEAR_NULL_TERADATA = 1753
"""
As default null date in Teradata source is 1753, this function compares
a given date with this value to identify null dates
:param fecha:
:return: True when provided date has 1753 as year
"""
return year(fecha) == YEAR_NULL_TERADATA
def compute_diff_days(col_name, ref_date_name, null_value=-1):
from pyspark.sql.functions import col, when, coalesce, lit, length, datediff
col_upd = col_name if not isinstance(col_name, str) else col(col_name)
col_ref_date = (
ref_date_name if not isinstance(ref_date_name, str) else col(ref_date_name)
)
col_upd = when(
(coalesce(length(col_upd), lit(0)) == 0)
| (coalesce(length(col_ref_date), lit(0)) == 0),
null_value,
).otherwise(datediff(col_ref_date, col_upd).cast("double"))
return col_upd
def get_nth_day_of_next_month(date_, nth = 5, format_date="%Y%m%d"):
from dateutil.relativedelta import relativedelta
import datetime as dt
return (dt.datetime.strptime(date_, format_date) + relativedelta(months=1, day=nth)).strftime("%Y%m%d")
def get_next_nth_day(date_, nth = 5):
# Day nth for the current month
nth_current_month = date_[0:6] + ('0' + str(nth) if nth < 10 else str(nth))
next_nth = nth_current_month if(get_diff_days(date_, nth_current_month, format_date="%Y%m%d") > 0) else get_nth_day_of_next_month(date_, nth = nth)
return next_nth
| [
"carmen.arnau1@vodafone.com"
] | carmen.arnau1@vodafone.com |
f34a9115279a8d7cbc59260fc5acfcab05b942c6 | 0e25dc15ae9efce8bfd716d4d2041da07767968b | /qbench/benchmarks/QLib/OPENQL_converted/benstein_vazirani_48b_secret_4.py | fc296603b534cdd78fb80f43ed23a31bafc0d90d | [] | no_license | alxhotel/crossbar-bench | f608fc0062b4f8a5162ec33d61c0204aaf27b6ff | 3bf7536e7697d29c3089b0ba564ba22d39698b88 | refs/heads/master | 2021-07-13T16:06:50.085838 | 2020-10-04T23:39:05 | 2020-10-04T23:39:05 | 213,409,122 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,382 | py | from openql import openql as ql
import os
import argparse
def circuit(config_file, new_scheduler='yes', scheduler='ASAP', uniform_sched= 'no', sched_commute = 'yes', mapper='base', moves='no', maptiebreak='random', initial_placement='no', output_dir_name='test_output', optimize='no', measurement=True, log_level='LOG_WARNING'):
curdir = os.path.dirname(__file__)
output_dir = os.path.join(curdir, output_dir_name)
ql.set_option('output_dir', output_dir)
ql.set_option('optimize', optimize)
ql.set_option('scheduler', scheduler)
ql.set_option('scheduler_uniform', uniform_sched)
ql.set_option('mapper', mapper)
ql.set_option('initialplace', initial_placement)
ql.set_option('log_level', log_level)
ql.set_option('scheduler_post179', new_scheduler)
ql.set_option('scheduler_commute', sched_commute)
ql.set_option('mapusemoves', moves)
ql.set_option('maptiebreak', maptiebreak)
config_fn = os.path.join(curdir, config_file)
# platform = ql.Platform('platform_none', config_fn)
platform = ql.Platform('starmon', config_fn)
sweep_points = [1,2]
num_circuits = 1
num_qubits = 50
p = ql.Program('benstein_vazirani_48b_secret_4', platform, num_qubits)
p.set_sweep_points(sweep_points, num_circuits)
k = ql.Kernel('benstein_vazirani_48b_secret_4', platform, num_qubits)
k.gate('prepz',[48])
k.gate('x',[48])
k.gate('h',[0])
k.gate('h',[1])
k.gate('h',[2])
k.gate('h',[3])
k.gate('h',[4])
k.gate('h',[5])
k.gate('h',[6])
k.gate('h',[7])
k.gate('h',[8])
k.gate('h',[9])
k.gate('h',[10])
k.gate('h',[11])
k.gate('h',[12])
k.gate('h',[13])
k.gate('h',[14])
k.gate('h',[15])
k.gate('h',[16])
k.gate('h',[17])
k.gate('h',[18])
k.gate('h',[19])
k.gate('h',[20])
k.gate('h',[21])
k.gate('h',[22])
k.gate('h',[23])
k.gate('h',[24])
k.gate('h',[25])
k.gate('h',[26])
k.gate('h',[27])
k.gate('h',[28])
k.gate('h',[29])
k.gate('h',[30])
k.gate('h',[31])
k.gate('h',[32])
k.gate('h',[33])
k.gate('h',[34])
k.gate('h',[35])
k.gate('h',[36])
k.gate('h',[37])
k.gate('h',[38])
k.gate('h',[39])
k.gate('h',[40])
k.gate('h',[41])
k.gate('h',[42])
k.gate('h',[43])
k.gate('h',[44])
k.gate('h',[45])
k.gate('h',[46])
k.gate('h',[47])
k.gate('h',[48])
k.gate('cnot',[2,48])
k.gate('h',[0])
k.gate('h',[1])
k.gate('h',[2])
k.gate('h',[3])
k.gate('h',[4])
k.gate('h',[5])
k.gate('h',[6])
k.gate('h',[7])
k.gate('h',[8])
k.gate('h',[9])
k.gate('h',[10])
k.gate('h',[11])
k.gate('h',[12])
k.gate('h',[13])
k.gate('h',[14])
k.gate('h',[15])
k.gate('h',[16])
k.gate('h',[17])
k.gate('h',[18])
k.gate('h',[19])
k.gate('h',[20])
k.gate('h',[21])
k.gate('h',[22])
k.gate('h',[23])
k.gate('h',[24])
k.gate('h',[25])
k.gate('h',[26])
k.gate('h',[27])
k.gate('h',[28])
k.gate('h',[29])
k.gate('h',[30])
k.gate('h',[31])
k.gate('h',[32])
k.gate('h',[33])
k.gate('h',[34])
k.gate('h',[35])
k.gate('h',[36])
k.gate('h',[37])
k.gate('h',[38])
k.gate('h',[39])
k.gate('h',[40])
k.gate('h',[41])
k.gate('h',[42])
k.gate('h',[43])
k.gate('h',[44])
k.gate('h',[45])
k.gate('h',[46])
k.gate('h',[47])
k.gate('h',[48])
if measurement:
for q in range(num_qubits):
k.gate('measure', [q])
p.add_kernel(k)
p.compile()
ql.set_option('mapper', 'no')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='OpenQL compilation of a Quantum Algorithm')
parser.add_argument('config_file', help='Path to the OpenQL configuration file to compile this algorithm')
parser.add_argument('--new_scheduler', nargs='?', default='yes', help='Scheduler defined by Hans')
parser.add_argument('--scheduler', nargs='?', default='ASAP', help='Scheduler specification (ASAP (default), ALAP, ...)')
parser.add_argument('--uniform_sched', nargs='?', default='no', help='Uniform shceduler actication (yes or no)')
parser.add_argument('--sched_commute', nargs='?', default='yes', help='Permits two-qubit gates to be commutable')
parser.add_argument('--mapper', nargs='?', default='base', help='Mapper specification (base, minextend, minextendrc)')
parser.add_argument('--moves', nargs='?', default='no', help='Let the use of moves')
parser.add_argument('--maptiebreak', nargs='?', default='random', help='')
parser.add_argument('--initial_placement', nargs='?', default='no', help='Initial placement specification (yes or no)')
parser.add_argument('--out_dir', nargs='?', default='test_output', help='Folder name to store the compilation')
parser.add_argument('--measurement', nargs='?', default=True, help='Add measurement to all the qubits in the end of the algorithm')
args = parser.parse_args()
try:
circuit(args.config_file, args.new_scheduler, args.scheduler, args.uniform_sched, args.sched_commute, args.mapper, args.moves, args.maptiebreak, args.initial_placement, args.out_dir)
except TypeError:
print('\nCompiled, but some gate is not defined in the configuration file. \nThe gate will be invoked like it is.')
raise | [
"alxmorais8@msn.com"
] | alxmorais8@msn.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.