blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5d51dcf1897c071819e438b377e20325171f356a | 40b1db3dbd327b0979749812a7388958afd0892c | /Django_practice/DjangoDay3/register_app/views.py | 7395203915c298117737fd8fd9b40ae26156c8a3 | [] | no_license | lucool/project | 91da0255a739b8464c415347c30d5aea69588dee | f2136f7435e817e057403d968b8eb70ddad889be | refs/heads/master | 2023-03-09T01:27:03.633168 | 2021-02-27T10:37:33 | 2021-02-27T10:37:33 | 340,904,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 628 | py | from django.shortcuts import render, redirect
from django.urls import reverse
def register_view(request):
if request.method == "GET":
return render(request,'register_app/register.html')
elif request.method == "POST":
regname = request.POST.get("regname")
regpwd = request.POST.get("regpwd")
#return redirect(reverse("reg:suc",args=(regname,))) # 重定向+反向解析
return redirect(reverse("reg:suc", kwargs={"username":regname})) # 重定向+反向解析
def success_view(request,username):
return render(request,'register_app/success.html',locals()) | [
"lu2015594025@163.com"
] | lu2015594025@163.com |
00be56f3a2d4c9c95afe72cadc2bad2c4cbd94a1 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /23htQEtZobC8cfwcm_2.py | 360e158c04b47b25a149be2dda61466f3f503056 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py |
def canConcatenate(lst, target):
nlst = []
for sub in lst:
for i in sub:
nlst.append(i)
return sorted(nlst)==sorted(target)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
1e6cbbc5dc6cb0a08716478087f3334a5456edb6 | dff19df8f651fcac7afa53cccd83f90aeef85772 | /account/migrations/0001_initial.py | fd72fc22d8d8b791d84243bd40bb47ee901e9057 | [] | no_license | felipefoc/Django-ToDo | fb6631625fe8d8189841fc43519a727b3fd69ee5 | c15509c22fa7dc9c01ec7b760535becfa0d21b75 | refs/heads/master | 2023-01-23T09:54:56.980485 | 2020-11-22T21:00:01 | 2020-11-22T21:00:01 | 311,521,609 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,203 | py | # Generated by Django 3.1.3 on 2020-11-16 04:53
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('tasktext', models.TextField(max_length=200)),
('create_at', models.DateTimeField(default=django.utils.timezone.now, editable=False)),
('is_active', models.BooleanField(default=True)),
('ended_date', models.DateTimeField(blank=True)),
('obs', models.TextField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='task', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['id'],
},
),
]
| [
"felipemfmayer@gmail.com"
] | felipemfmayer@gmail.com |
3ce0277d9ac32d4776f25bf49f8feb9b9d398fcb | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/54/usersdata/102/23160/submittedfiles/av1_p2_civil.py | 25adbefb3c5ac9e4e683f29149470bf2394ec2ea | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 189 | py | # -*- coding: utf-8 -*-
from __future__ import division
n = input('Digite o valor de n:')
i = 1
cont = 0
while i<n:
if i%2==1:
cont = cont + 1
i = i + 1
print (cont) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
dfc8858b3e9adf2feaecf24069bb1764bce3df59 | 8fcc27160f8700be46296568260fa0017a0b3004 | /client/spacecomponents/server/components/bountyEscrow/persister.py | 6306404cfd75f08055edba12151d71291620eb72 | [] | no_license | connoryang/dec-eve-serenity | 5d867f4eedfa896a4ef60f92556356cafd632c96 | b670aec7c8b4514fc47cd52e186d7ccf3aabb69e | refs/heads/master | 2021-01-22T06:33:16.303760 | 2016-03-16T15:15:32 | 2016-03-16T15:15:32 | 56,389,750 | 1 | 0 | null | 2016-04-16T15:05:24 | 2016-04-16T15:05:24 | null | UTF-8 | Python | false | false | 1,523 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\packages\spacecomponents\server\components\bountyEscrow\persister.py
class Persister(object):
def __init__(self, solarSystemID, itemID, dbspacecomponent):
self.isDirty = False
self.itemID = itemID
self.solarSystemID = solarSystemID
self.dbspacecomponent = dbspacecomponent
def _GetBountiesParameters(self, bountyContributors):
charIDs = []
bounties = []
for charID, isk in bountyContributors.iteritems():
charIDs.append(str(charID))
bounties.append(str(isk))
charIDsAsString = ','.join(charIDs)
bountiesAsString = ','.join(bounties)
return (charIDsAsString, bountiesAsString)
def PersistEverything(self, bountyContributors, bountyBonus):
if not self.isDirty:
return
bountyArgs = self._GetBountiesParameters(bountyContributors)
self.dbspacecomponent.BountyEscrow_PersistState(self.solarSystemID, self.itemID, bountyBonus, *bountyArgs)
self.isDirty = False
def GetStateForSystem(self):
bonusRows, bounties = self.dbspacecomponent.BountyEscrow_GetState(self.solarSystemID, self.itemID)
if not bonusRows:
bonus = 0.0
else:
bonus = bonusRows[0].bountyEscrowBonus
return (bonus, {r.characterID:r.iskValue for r in bounties})
def MarkDirty(self):
self.isDirty = True
| [
"masaho.shiro@gmail.com"
] | masaho.shiro@gmail.com |
147ac53fdb17b96dc9059fa2ee3c3ac89248b368 | 10ed1072d48185756f9630774b42aaa513dd05d5 | /core/models.py | ef81db43b995d606518e0159cf0c894e8d487b4d | [] | no_license | carlcyh2017/SchoolSys | 4a2f67a620b6c8918127cd018f811592aaf5e593 | 8ecbe01b1b3ab75ac4f51ab3d20768de28f2ed4d | refs/heads/master | 2021-09-12T15:40:21.135902 | 2018-04-18T06:27:39 | 2018-04-18T06:27:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,099 | py | # -*- coding: utf-8 -*- @Time : 18-1-17 下午12:57
# @Author : QiHanFang @Email : qihanfang@foxmail.com
import pickle
import re
from random import randint
from conf.settings import *
from lib.utils import create_id, hash_pwd
from core.logger import log_generate
class Base:
"""基类,用户继承属性"""
def __init__(self, name):
# 自定义id_属性, 唯一性
self.id_ = create_id()
self.name = name
def save(self):
"""保存类对象至指定路径"""
file_path = r'%s/%s' % (self.save_path, self.id_)
with open(file_path, 'wb') as f:
# 利用pickle方法保存python对象
pickle.dump(self, f)
@classmethod
def get_all_objects(cls):
"""通过路径获取类的所有实例化对象,返回对象列表"""
obj_lst = []
obj_id_l = os.listdir(cls.save_path)
for obj_id in obj_id_l:
obj = pickle.load(open('%s/%s' % (cls.save_path, obj_id), 'rb'))
obj_lst.append(obj)
return obj_lst
def __str__(self):
"""打印对象返回的信息"""
res = ''
for i in self.__dict__.items():
res += i[0].ljust(20) + i[1] + '\n'
# print('详细信息'.center(30, '>'))
return res
@classmethod
def get_obj_by_name(cls, name):
"""类方法,通过name获取类的对象,"""
id_l = os.listdir(cls.save_path)
obj_lst = [pickle.load(open('%s/%s' % (cls.save_path, id), 'rb')) for id in id_l]
target_obj = None
for obj in obj_lst:
if obj.name == name:
target_obj = obj
break
return target_obj
@classmethod
def get_obj_by_id(cls, id_str):
"""类方法,通过id找到类的对象"""
id_l = os.listdir(cls.save_path)
obj_lst = [pickle.load(open('%s/%s' % (cls.save_path, _id), 'rb')) for _id in id_l]
target_obj = None
for obj in obj_lst:
if obj.id_ == id_str:
target_obj = obj
break
return target_obj
class School(Base):
'''学校类, 兼顾创建班级,招收教师等接口'''
save_path = SCHOOL_PATH
def __init__(self):
'''城市和具体地址'''
if DEBUG:
print('in School')
print()
print('创办学校中'.center(20, '-'))
school_lst = School.get_all_objects()
while True:
# 1. 输入城市
city = input('城市名称(q.返回主界面) >>> ').strip()
if city == 'q':
return
if city not in MAIN_CITIES:
print('\033[1;35m 对不起,该城市不在规划内 \033[0m')
else:
school_city_lst = [school.city for school in school_lst]
if city not in school_city_lst:
print('\033[1;35m 对不起,该城市已经存在分校区 \033[0m')
else:
# 2. 具体地址
address = input('学校地址 >>> ').strip()
# 初始化父类的属性
super().__init__('%s分校区' % city)
self.address = address
self.city = city
# 3. 保存新建的学校对象, 即调用父类的save方法保存至指定的路径
self.save()
print('学校建成'.center(20, '-'))
log_generate(log_type='admin', id='admin', message={
'type': '创办新校区', '城市': city, })
return
def __str__(self):
return '学校名称<{0}> 地址<{1}> {2}'.format(self.name, self.address, self.__dict__)
def start_course(self):
"""开设课程"""
Course(self)
def start_class(self):
"""开设班级"""
Class(self)
def display_school_courses(self):
"""打印当前学校所有课程"""
course_lst = self.school_courses
if not course_lst:
print('\033[1;35m 目前没有引进任何课程 \033[0m')
return
print('%s已开设课程如下'.center(30, '-') % self.name)
print('name'.ljust(9), 'month'.ljust(8), 'price')
for course in course_lst:
print(str(course.name).ljust(9), str(course.learning_time).ljust(8), course.price)
print(''.center(30, '-'))
def display_school_classes(self):
"""显示当前学校所有班级"""
if not self.school_classes:
print('\033[1;35m该学校目前还未开班 \033[0m')
else:
print()
print('校区班级列表'.center(20, '-'))
print('%s已开设课程如下'.center(30, '-') % self.name)
print('NAME'.ljust(14), 'MONTH'.ljust(8), 'TEACHER')
for school_class in self.school_classes:
class_course = Course.get_obj_by_id(school_class.course_id)
class_teacher = Teacher.get_obj_by_id(school_class.teacher_id)
print(str(school_class.name).ljust(14), class_course.name.ljust(8), class_teacher.name)
print(''.center(20, '-'))
def display_school_teachers(self):
"""显示当前学校所有老师"""
school_teacher_lst = self.school_teachers
if not school_teacher_lst:
print('\033[1;35m目前未招收教师\033[0m')
return
else:
print('%s的教师信息如下'.center(30, '-') % self.name)
print('NAME'.ljust(10), 'GOOD_AT'.ljust(10), 'EXPERIENCE(year)')
for school_teacher in school_teacher_lst:
print(str(school_teacher.name).ljust(10),
str(school_teacher.teaching_course).ljust(10), int(school_teacher.teaching_years))
print(''.center(20, '-'))
@property
def school_courses(self):
"""返回当前学校所有课程对象列表"""
course_lst = []
course_id_lst = os.listdir(Course.save_path)
for course_id in course_id_lst:
course = pickle.load(open('%s/%s' % (Course.save_path, course_id), 'rb'))
if course.school_id == self.id_:
course_lst.append(course)
# if not course_lst:
# print('本学校未开设课程')
return course_lst
@property
def school_classes(self):
"""返回当前学校所有班级对象列表"""
school_class_lst = []
school_class_id_lst = os.listdir(Class.save_path)
for school_class_id in school_class_id_lst:
school_class = pickle.load(open('%s/%s' % (Class.save_path, school_class_id), 'rb'))
if school_class.school_id == self.id_:
school_class_lst.append(school_class)
return school_class_lst
@property
def school_teachers(self):
"""返回当前学校所有老师对象列表"""
school_teacher_lst = []
school_teacher_id_lst = os.listdir(Teacher.save_path)
for teacher_id in school_teacher_id_lst:
school_class = pickle.load(open('%s/%s' % (Teacher.save_path, teacher_id), 'rb'))
if school_class.school_id == self.id_:
school_teacher_lst.append(school_class)
if not school_teacher_lst:
print('对不起,本学校目前还未招收老师')
return
return school_teacher_lst
@property
def school_students(self):
"""返回当前学校所有学生对象列表"""
school_student_lst = []
school_student_id_lst = os.listdir(Student.save_path)
for teacher_id in school_student_id_lst:
school_student = pickle.load(open('%s/%s' % (Student.save_path, teacher_id), 'rb'))
if school_student.school_id == self.id_:
school_student_lst.append(school_student)
return school_student_lst
class Teacher(Base):
'''教师类'''
save_path = TEACHER_PATH
def __init__(self, school):
print()
print('招聘讲师中'.center(20, '-'))
while True:
# 1. 输入课程
teaching_course = input('擅长课程(退出:q)>>> ').strip()
if teaching_course == 'q':
return
if teaching_course not in HOT_COURSES : # 只招收特定课程的教师
print("对不起,您的课程不符合招聘要求['python', 'linux', 'go', 'java', 'php', 'c', 'c++']")
else:
# 2. 输入教龄
teaching_years = input('经验(年) >>> ').strip()
if not teaching_years.isdigit() or int(teaching_years) not in range(1, 50):
print('\033[1;35m对不起,我们招聘的教师至少需要2年工作经验 \033[0m')
else:
while True:
# 3. 输入姓名
name = input('姓名(不能为空, 退出:q) >>> ').strip()
if name == 'q':
return
if school.school_teachers:
teacher_name_lst = [teacher.name for teacher in school.school_teachers if school.school_teachers]
if name in teacher_name_lst:
print('\033[1;35m 对不起,该教师已经招聘 \033[0m')
continue
# 4. 输入年龄, 性别
age = input('年龄(数字) >>> ').strip()
gender = input('性别(男|女) >>> ').strip()
if not name or not age.isdigit() or gender not in ('男', '女'):
print('\033[1;35m姓名或性别输入有误\033[0m')
else:
while True:
# 5. 输入密码
login_pwd = input('请输入您的登录密码(至少六位数)>>> ').strip()
if len(login_pwd) < 6 or not login_pwd.isalnum():
print('\033[1;35m密码至少需要六位字母或数字 \033[0m')
else:
# 6. 对象属性赋值及保存, 保存至对应的路径
super().__init__(name)
self.login_pwd = hash_pwd(login_pwd)
self.school_id = school.id_
self.age = age
self.gender = gender
self.teaching_course = teaching_course
self.teaching_years = teaching_years
print('招聘讲师成功'.center(20, '-'))
self.save()
log_generate(log_type='admin', id='admin', message={
'type': '招收教师', '教师姓名': name, '性别': gender,
'教授课程': teaching_course, '经验(年)': int(teaching_years)})
return
class Course(Base):
'''课程类'''
save_path = COURSE_PATH
def __init__(self, school):
print()
print('引进课程中'.center(20, '-'))
while True:
# 1. 输入课程名称
course_name = input('课程名(退出q) >>> ').strip()
if course_name == 'q':
return
if course_name not in HOT_COURSES:
# 判断课程是否在规划范围内
print('\033[1;35m此课程不在规划范围内 \033[0m')
else:
if course_name in [course.name for course in school.school_courses]: # 判断是否重复创建
print('\033[1;35m 对不起,课程<%s>本校区已经创建 \033[0m' % course_name)
else:
# 2. 学习时长, 收费
learning_time = input('学习时长(月) >>> ').strip()
price = input('收费(元) >>> ').strip()
if not learning_time.isdigit() or not price.isdigit(): # 输入合法性
print('\033[1;35m时间或价格输入有误 \033[0m')
else:
if int(learning_time) not in range(1, 13):
print('\033[1;35m学习时长应该保持在1-13个月 \033[0m')
else:
# 3. 保存对象至指定的路径
super().__init__(course_name)
self.school_id = school.id_
self.learning_time = learning_time
self.price = price
print('课程引进成功'.center(20, '-'))
print()
self.save()
log_generate(log_type='admin', id='admin', message={
'type': '课程引进', '课程名': course_name, '课程时长': learning_time})
return
class Class(Base):
"""班类"""
save_path = CLASS_PATH
def __init__(self, school):
print()
print('班级创建中'.center(20, '-'))
if DEBUG:
print('in Class')
self.school_id = school.id_
while True:
if not school.school_courses: # 判断本校是否有课程
print('\033[1;35m 本小区目前未引进任何课程 \033[0m')
return
school.display_school_courses()
# 1. 绑定课程
course_name = input('请选择课程名称 >>> ').strip()
if course_name not in [course.name for course in school.school_courses]: # 筛选重复课程
print('\033[1;35m该课程未引进,请重新选择\033[0m')
else:
self.course_id = Course.get_obj_by_name(course_name).id_
course_teachers_lst = []
for school_teacher in school.school_teachers:
if school_teacher.teaching_course == course_name:
course_teachers_lst.append(school_teacher)
if not course_teachers_lst: # 筛选擅长本课程的教师
print('\033[1;35m对不起,目前没有招收此课程的教师\033[0m')
return
school.display_school_teachers()
while True:
# 2. 指定班级教师
teacher_name = input('选择教师 >>> ').strip()
if not Teacher.get_obj_by_name(teacher_name):
print('\033[1;35m 教师姓名输入有误 \033[0m')
else:
if Teacher.get_obj_by_name(teacher_name).teaching_course == course_name:
self.teacher_id = Teacher.get_obj_by_name(teacher_name).id_
class_name = input('输入班级名称>>> ').strip()
if class_name not in [class_.name for class_ in school.school_classes]:
super().__init__(class_name)
print('班级创建成功'.center(20, '-'))
print()
self.save()
log_generate(log_type='admin', id='admin', message={
'type': '成立班级', '课程名': course_name, '班级名': class_name, '班级教师': teacher_name})
return
else:
print('\033[1;35m 班级名重复 \033[0m')
else:
print('\033[1;35m您选择的教师不擅长本班级课程 \033[0m')
@property
def class_students_info(self):
"""打印学生信息,排除未激活的学生"""
student_lst = Student.get_all_objects()
class_students_lst = []
if not student_lst:
print('本校区目前没有招收学生')
return
for student in student_lst:
if student.class_id == self.id_ and student.active == 1:
class_students_lst.append(student)
return class_students_lst
class Student(Base):
"""学生类创建"""
save_path = STUDENT_PATH # 类属性,保存类的对象的路径
def __init__(self):
print('\n'+'注册中'.center(20, '-'))
# 取出所有学校对象
school_lst = School.get_all_objects()
if not school_lst:
print('\033[1;35m 对不起,目前没有校区 \033[0m')
return
while True:
print()
print('分校列表'.center(30, '-'))
print('学校名称'.ljust(10), '地址'.ljust(15))
for school in school_lst:
print(str(school.name).ljust(10), str(school.address).ljust(15))
# 1. 选择学校
school_name = input('请选择学校(退出:q)>>> ').strip()
if school_name == 'q': # 选择退出,返回主界面
return
if not School.get_obj_by_name(school_name): # 判断学校名称输入是否有误
print('\033[1;35m 学校名称输入有误,请重新输入 \033[0m')
else:
# 2. 取出学校对象
school = School.get_obj_by_name(school_name)
# 3. 取出学校的多有班级
if not school.school_classes:
print('\033[1;35m 对不起,当前学校没有创建班级 \033[0m')
else:
self.school_id = school.id_
school.display_school_classes() # 展示班级列表
# 获取当前学习的所有班级
class_name_lst = [class_.name for class_ in school.school_classes] # 班级名称列表
while True:
# 4. 选择要加入的班级
class_name = input('请选择班级 >>> ').strip()
if class_name not in class_name_lst:
print('\033[1;35m 班级名输入有误,请重新输入 \033[0m')
else:
# 5. 获取班级对象
class_ = Class.get_obj_by_name(class_name)
if not class_: # 通过名称获取班级
print('\033[1;35m 班级名称有误,请重新输入 \033[0m')
else:
while True:
# 6. 输入密码
login_pwd = input('请设置登录密码(至少六位数)>>> ').strip()
if len(login_pwd) < 6 or not login_pwd.isalnum():
print('\033[1;35m密码至少需要六位字母或数字 \033[0m')
else:
self.login_pwd = hash_pwd(login_pwd)
self.active = 0
self.class_id = Class.get_obj_by_name(class_name).id_
# 7. 输入姓名
name = input('输入姓名(中文名字优先) >>> ').strip()
# 判断是否存在重复姓名
student_name_lst = [stu.name for stu in class_.class_students_info]
if name in student_name_lst:
print('\033[1;35m 学生姓名重复 \033[0m')
else:
# 8. 输入年龄和性别等信息
age = input('年龄(必须为数字) >>> ').strip()
gender = input('性别(男|女) >>> ').strip()
if not name or gender not in ('男', '女') or not age.isdigit():
print('\033[1;35m 名字,性别(male|female)或者年龄输入有误 \033[0m', end='\n\n')
else:
super().__init__(name)
self.age = age
self.gender = gender
while True:
# 9. 输入联系方式
mobile = input('联系方式 >>> ').strip()
if not re.match('1[358]\d{9}', mobile):
print('\033[1;35m电话格式有误 \033[0m')
else:
self.mobile = mobile
address = input('请输入您的住址>>> ').strip()
if not address:
print('\033[1;35m住址不能为空 \033[0m')
else:
self.address = address
print('%s同学,恭喜您注册成功!' % name)
print()
self.save()
log_generate(log_type='student', id=self.id_,
message={'type': '注册', 'name': self.name,
'school': school.name,
'course': self.name,
'class': class_name,
'address': address})
return
@property
def final_exam_result(self):
exam_result = randint(60, 100)
return exam_result
if __name__ == '__main__':
l = Teacher.get_all_objects()
# for teacher in l:
# print(teacher.name, teacher.teaching_course, School.get_obj_by_id(teacher.school_id)) | [
"qihanfang@foxmail.com"
] | qihanfang@foxmail.com |
8ca9949f60472c84308104b755239a122b758f5e | 18b977dccd70e9e5a1b553b28ab0413fb3f54f4b | /SoftUni/Python Developmen/Python-Basics/4_Conditional_Statements/fishing_boat.py | 7f0dbb468fa2c040c471a83c952bd44aa16c25e3 | [] | no_license | stevalang/Coding-Lessons | 7203e3a18b20e33e8d596e3dfb58d26c50b74530 | 2d0060c2268ad966efdcae4e6e994ac15e57243a | refs/heads/master | 2023-06-05T08:28:33.290530 | 2021-06-16T19:37:29 | 2021-06-16T19:37:29 | 284,852,565 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,078 | py | budget = int(input())
season = input()
fishermen_count = int(input())
price = 0
ship_spring = 3000
ship_summer_autumn = 4200
ship_winter = 2600
if fishermen_count <= 6:
if season == 'Spring':
price = ship_spring * 0.9
elif season == 'Autumn' or season == 'Summer':
price = ship_summer_autumn * 0.9
else:
price = ship_winter * 0.9
elif fishermen_count <= 11:
if season == 'Spring':
price = ship_spring * 0.85
elif season == 'Autumn' or season == 'Summer':
price = ship_summer_autumn * 0.85
else:
price = ship_winter * 0.85
else:
if season == 'Spring':
price = ship_spring * 0.75
elif season == 'Autumn' or season == 'Summer':
price = ship_summer_autumn * 0.75
else:
price = ship_winter * 0.75
if fishermen_count % 2 == 0 and season != 'Autumn':
price *= 0.95
else:
price = price
difference = abs(budget - price)
if budget >= price:
print(f'Yes! You have {difference:.2f} leva left.')
else:
print(f'Not enough money! You need {difference:.2f} leva.')
| [
"rbeecommerce@gmail.com"
] | rbeecommerce@gmail.com |
64a61209a7c3321452f4445dd94ad549ba4eb752 | e8f7993403776ff414b370f9125891eecfe109ac | /dcn/simplesites/__init__.py | 55022ddb772e9c8d51819bd2f4613801b58f5b5f | [] | no_license | smcmahon/dcn.simplesites | db6d7524ed50bf6ad6601707ee4a4fd83edd1cf3 | aa19585e1bb75cf61cccaa08f711346600b52f23 | refs/heads/master | 2016-09-05T11:21:44.737796 | 2013-06-17T23:13:38 | 2013-06-17T23:13:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,921 | py | from zope.i18nmessageid import MessageFactory
from zope.schema.vocabulary import SimpleVocabulary, SimpleTerm
# Set up the i18n message factory for our package
MessageFactory = MessageFactory('dcn.simplesites')
# token, value, title
# tokens are what's saved;
# values are the html option values
# titles are presented to user;
# if value is missing, it is taken from token
# if title is missing, it is taken from value
skins_vocab = SimpleVocabulary([
SimpleTerm('Sunburst Theme', 'flex', title=u'Flexible Layout'),
SimpleTerm('Plone Classic Theme', 'fixed', title=u'Fixed Layout'),
])
license_vocab = SimpleVocabulary([
SimpleTerm('None', title=u'None'),
SimpleTerm('CC BY', title=u'Creative Commons Attribution'),
SimpleTerm('CC BY-ND', title=u'Creative Commons Attribution, No-Derivatives'),
SimpleTerm('CC BY-SA', title=u'Creative Commons Attribution, Share-Alike'),
SimpleTerm('CC BY-NC', title=u'Creative Commons Attribution, Non-Commercial'),
SimpleTerm('CC BY-NC-ND', title=u'Creative Commons Attribution, Non-Commercial, No-Derivatives'),
SimpleTerm('CC BY-NC-SA', title=u'Creative Commons Attribution, Non-Commercial, Share-Alike'),
])
license_display = {
'None': u'',
'CC BY': """<a rel="license" href="http://creativecommons.org/licenses/by/3.0/us/deed.en_US"><img alt="Creative Commons License" style="border-width:0" src="http://i.creativecommons.org/l/by/3.0/us/80x15.png" /></a><br />This work is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by/3.0/us/deed.en_US">Creative Commons Attribution 3.0 United States License</a>.""",
'CC BY-ND': """<a rel="license" href="http://creativecommons.org/licenses/by-nd/3.0/us/deed.en_US"><img alt="Creative Commons License" style="border-width:0" src="http://i.creativecommons.org/l/by-nd/3.0/us/80x15.png" /></a><br />This work is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-nd/3.0/us/deed.en_US">Creative Commons Attribution-NoDerivs 3.0 United States License</a>.""",
'CC BY-SA': """<a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/us/deed.en_US"><img alt="Creative Commons License" style="border-width:0" src="http://i.creativecommons.org/l/by-sa/3.0/us/80x15.png" /></a><br />This work is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/us/deed.en_US">Creative Commons Attribution-ShareAlike 3.0 United States License</a>.""",
'CC BY-NC': """<a rel="license" href="http://creativecommons.org/licenses/by-nc/3.0/us/deed.en_US"><img alt="Creative Commons License" style="border-width:0" src="http://i.creativecommons.org/l/by-nc/3.0/us/80x15.png" /></a><br />This work is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-nc/3.0/us/deed.en_US">Creative Commons Attribution-NonCommercial 3.0 United States License</a>.""",
'CC BY-NC-ND': """<a rel="license" href="http://creativecommons.org/licenses/by-nc-nd/3.0/us/deed.en_US"><img alt="Creative Commons License" style="border-width:0" src="http://i.creativecommons.org/l/by-nc-nd/3.0/us/80x15.png" /></a><br />This work is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-nc-nd/3.0/us/deed.en_US">Creative Commons Attribution-NonCommercial-NoDerivs 3.0 United States License</a>.""",
'CC BY-NC-SA': """<a rel="license" href="http://creativecommons.org/licenses/by-nc-nd/3.0/us/deed.en_US"><img alt="Creative Commons License" style="border-width:0" src="http://i.creativecommons.org/l/by-nc-nd/3.0/us/80x15.png" /></a><br />This work is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-nc-nd/3.0/us/deed.en_US">Creative Commons Attribution-NonCommercial-NoDerivs 3.0 United States License</a>.""",
}
site_credit = """This site provided with the assistance of the <a href="http://www2.dcn.org/dcn">Davis Community Network</a>.""" | [
"steve@dcn.org"
] | steve@dcn.org |
d18d477979825923900b12a742b7dc51c68e53ed | d21071464bef4f3fd51e554f280418d06975a77e | /leetcode/146 LRU cache.py | 366cc00adde6ee948f057550ffc92ccb472c8ca7 | [] | no_license | DeshErBojhaa/sports_programming | ec106dcc24e96231d447cdcac494d76a94868b2d | 96e086d4ee6169c0f83fff3819f38f32b8f17c98 | refs/heads/master | 2021-06-13T19:43:40.782021 | 2021-03-27T14:21:49 | 2021-03-27T14:21:49 | 164,201,394 | 1 | 0 | null | 2019-08-27T22:21:26 | 2019-01-05T09:39:41 | C++ | UTF-8 | Python | false | false | 1,768 | py | from collections import deque
class Node:
def __init__(self, key):
self.k = key
self.next = None
self.prev = None
class LRUCache:
def __init__(self, capacity: int):
self.N = capacity
self.head = Node(-10000)
self.tail = Node(10000)
self.head.next = self.tail
self.tail.prev = self.head
self.d = {}
self.node_cache = {}
def get(self, key: int) -> int:
if not key in self.d:
return -1
nd = self.node_cache[key]
# O->N->P
# O<-N<-P
nxt = nd.next
prev = nd.prev
nxt.prev = prev
prev.next = nxt
self.head.next.prev = nd
nd.next = self.head.next
self.head.next = nd
nd.prev = self.head
return self.d[key]
def put(self, key: int, value: int) -> None:
if key in self.d:
self.d[key] = value
nd = self.node_cache[key]
nd.prev.next = nd.next
nd.next.prev = nd.prev
self.head.next.prev = nd
nd.next = self.head.next
self.head.next = nd
nd.prev = self.head
return
if len(self.d) == self.N:
last_node = self.tail.prev
last_node.prev.next = self.tail
self.tail.prev = last_node.prev
k = last_node.k
del self.d[k]
del self.node_cache[k]
new_node = Node(key)
self.head.next.prev = new_node
new_node.next = self.head.next
self.head.next = new_node
new_node.prev = self.head
self.d[key] = value
self.node_cache[key] = new_node
| [
"noreply@github.com"
] | DeshErBojhaa.noreply@github.com |
4ee99e8dc62e8235506617820b0213baa4574f24 | e81a351d6049a05b92925c9b781d07701345713c | /Language Proficiency/Python/betweenTwoSets.py | c7fbb312611315069616c8b210a93f70eaa99062 | [] | no_license | dmiruke/HackerRank-Python | 1b2fa331d78ce0b4b23628d081441ca008150bd7 | 86b42458cae43e83dbd339665b31926eca0feacd | refs/heads/master | 2022-01-13T22:16:27.338429 | 2019-07-16T08:00:13 | 2019-07-16T08:00:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | #!/bin/python3
from math import gcd
#
# Complete the getTotalX function below.
#
def getTotalX(a, b):
#
# Write your code here.
#
lo = a[0]
for i in a[1:]:
lo = int(lo*i/gcd(lo,i))
hi = b[0]
for i in b[1::]:
hi = int(gcd(hi, i))
count = 0
i = 1
for i in range(lo, hi+1, lo):
if gcd(i,int(hi)) == i:
count += 1
return count
nm = input().split()
n = int(nm[0])
m = int(nm[1])
a = list(map(int, input().rstrip().split()))
b = list(map(int, input().rstrip().split()))
print(getTotalX(a, b))
| [
"atharvapusalkar18@gmail.com"
] | atharvapusalkar18@gmail.com |
dddb37bcbb0fedac09f6b7b4160c9dec9062cb51 | 471b464cd4ec351cb8eb0918ee3658ab22438e47 | /test/test_edict.py | 396a58d7d6708f845f60cffee64a89d131c64da1 | [] | no_license | patarapolw/edictreader | 39c03d551fcc9f38315450ec9fb223e9134455a8 | d1dda8c079b443f66d851fafa9f536b6b0d9453a | refs/heads/master | 2021-04-06T11:01:16.941063 | 2018-03-16T05:32:16 | 2018-03-16T05:32:16 | 125,215,915 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | from time import time
from edictreader.dict import Edict2
def test_search_japanese():
start = time()
d = Edict2()
end = time()
# for item in d:
# print(item)
start2 = time()
print(list(d.search({'japanese': '鼹鼠'})))
end2 = time()
print('__init__() takes {:.4f} seconds'.format(end - start))
print('search() takes {:.4f} seconds'.format(end2 - start2))
if __name__ == '__main__':
test_search_japanese()
| [
"patarapolw@gmail.com"
] | patarapolw@gmail.com |
becfe27bbbe6105ff91fa15958ab1ee1894380bd | 18c202413f1737d0c603ebc4eb566a4724b0dd37 | /media/linux/pds-sqlite3-queries/create-ministry-rosters.py | 4df8f9f7d381fa645b3fb85a57ae230a3b1e74af | [] | no_license | jsquyres/epiphany | 01f1a5dc2ec4ac21887f41ac7335376c154b9972 | 60e4a201b8186e63102b7649a3813e5ae7fa737a | refs/heads/main | 2023-08-31T04:25:08.520607 | 2021-08-27T15:02:11 | 2021-08-27T15:02:11 | 98,797,275 | 0 | 1 | null | 2021-09-26T01:18:29 | 2017-07-30T12:53:18 | Python | UTF-8 | Python | false | false | 14,410 | py | #!/usr/bin/env python3
#
# See https://openpyxl.readthedocs.io/en/stable/index.html
#
# pip3.6 install openpyxl
#
import sys
import os
import logging.handlers
import logging
# We assume that there is a "ecc-python-modules" sym link in this
# directory that points to the directory with ECC.py and friends.
moddir = os.path.join(os.getcwd(), 'ecc-python-modules')
if not os.path.exists(moddir):
print("ERROR: Could not find the ecc-python-modules directory.")
print("ERROR: Please make a ecc-python-modules sym link and run again.")
exit(1)
sys.path.insert(0, moddir)
import ECC
import Google
import PDSChurch
import GoogleAuth
import googleapiclient
from datetime import datetime
from datetime import timedelta
from oauth2client import tools
from apiclient.http import MediaFileUpload
from openpyxl import Workbook
from openpyxl.styles import Font, PatternFill, Alignment
from pprint import pprint
from pprint import pformat
# Globals
gapp_id = 'client_id.json'
guser_cred_file = 'user-credentials.json'
ministries = [
{
"ministry" : '100-Parish Pastoral Council',
"gsheet_id" : '1aIoStpSOsup8XL5eNd8nhpJwM-IqN2gTkwVf_Qvlylc',
"birthday" : False,
},
{
"ministry" : '102-Finance Advisory Council',
"gsheet_id" : '1oGkjyLDexQyb-z53n2luFpE9vU7Gxv0rX6XirtxSjA0',
"birthday" : False,
},
{
"ministry" : '103-Worship Committee',
"gsheet_id" : '1h_ZvhkYlnebIu0Tk7h1ldJo-VKnJsJGe1jEzY34mcd0',
"birthday" : False,
},
{
"ministry" : '106-Community Life Committee',
"gsheet_id" : '1k_hH1tEWBGuERCmFvhZxKOfAsBkqy0uZ16LAd0_jMDg',
"birthday" : False,
},
{
"ministry" : '107-Social Resp Steering Comm',
"gsheet_id" : '1Am3v0Pv4D9zubkGYgFbUd8e92PZnBPrbcwKrMrs8AnI',
"birthday" : False,
},
{
"ministry" : '110-Ten Percent Committee',
"gsheet_id" : '18BIrnBWf_4LS9XeC9tordSD1SBgJz67a0I9Ouj6ZcEc',
"birthday" : False,
},
{
"ministry" : '207-Technology Committee',
"gsheet_id" : '1Gn2m2VMabPkWJWg_NTs6XeGPf_Qi7qPELLxyOx9Q0vU',
"birthday" : False,
},
{
"ministry" : '310-Adult Choir',
"gsheet_id" : '1ku8Aq9dXm_mrOq421MWVk7hAqV2Am5FFSgUACOYs2WU',
"birthday" : False,
},
{
"ministry" : '311-Bell Choir',
"gsheet_id" : '1UTzXgO9ZLBHB0w-zAW8-u57cgWLbkWWGanJgPC9gboE',
"birthday" : True,
},
{
"ministry" : '317-Instrumentalists & Cantors',
"gsheet_id" : '1YP3sC4dcOWH9Li1rJV8D5FI9mef50xvxqOf6K1K54_U',
"birthday" : True,
},
{
"ministry" : '318-Lectors MASTER LIST',
"gsheet_id" : '1X796X7_wFZmYoKMzGnj2BFFCOeoncIEILv1cmq_CJB8',
"birthday" : False,
},
{
"ministry" : '451-Livestream Team Ministry',
"gsheet_id" : '1Yku0IFuIKZCeUNGB5c_Ser_geYkylC2o1tiVfaNwkx8',
"birthday" : False,
},
{
"ministry" : '600-Men of Epiphany',
"gsheet_id" : '11LCDr-Vc3jyeKh5nrd49irscdvTv3TDXhpOoFWlohgs',
"birthday" : False,
},
{
"ministry" : '601-Sages (for 50 yrs. +)',
"gsheet_id" : '1-uvQO5RRf0K6NJlR_4Mijygn4XGk0zhvowdflKLoEUc',
"birthday" : False,
},
{
"ministry" : '700-Advocates for Common Good',
"gsheet_id" : '1Iz8hz7NAhh9-dVMiC7mL8yYFi_qmM_ayB5IXhJU0uPw',
"birthday" : False,
},
{
"ministry" : '710-Environmental Concerns',
"gsheet_id" : '1jsoRxugVwXi_T2IDq9J-mEVdzS8xaOk9kuXGAef-YaQ',
"birthday" : False,
},
{
"ministry" : '711-Hispanic Ministry Team',
"gsheet_id" : '1zUJLVRkzS79uVQYgMkA9YaUfSFrY4Wax0ys5jSfpkEg',
"birthday" : False,
},
{
"ministry" : '803-Youth Ministry AdultMentor',
"gsheet_id" : '1jzg9jRNUrjb9CeMRC23d4pkr2CQOUQNaOgL-EMDXOW4',
"birthday" : False,
},
{
"ministry" : '84-Youth Grp(Jr High)Adult Vol',
"gsheet_id" : '1b76OIhb9XDYg7llAHArg6lP9fCi9PrzJpcZcU8jjMBk',
"birthday" : False,
},
{
"ministry" : '88-Youth Grp(Sr Hi) Adult Vol',
"gsheet_id" : '14K4MaYEzPgHvnkf-Z1yFJPB-9YJSP1Ytg9rcou6ohyo',
"birthday" : False,
},
{
"ministry" : '91-Youth Council',
"gsheet_id" : '1zTGviZ6R3fus11maPl3GQZghCcvs1zC9oVSIV6uHXO4',
"birthday" : False,
},
]
####################################################################
def write_xlsx(members, ministry, want_birthday, log):
# Make the microseconds be 0, just for simplicity
now = datetime.now()
us = timedelta(microseconds=now.microsecond)
now = now - us
timestamp = ('{year:04}-{mon:02}-{day:02} {hour:02}:{min:02}'
.format(year=now.year, mon=now.month, day=now.day,
hour=now.hour, min=now.minute))
filename = (f'{ministry} members as of {timestamp}.xlsx')
# Put the members in a sortable form (they're currently sorted by MID)
sorted_members = dict()
for m in members:
# 'Name' will be "Last,First..."
sorted_members[m['Name']] = m
wb = Workbook()
ws = wb.active
# Title rows + set column widths
title_font = Font(color='FFFF00')
title_fill = PatternFill(fgColor='0000FF', fill_type='solid')
title_align = Alignment(horizontal='center')
last_col = 'C'
if want_birthday:
last_col = 'D'
row = 1
ws.merge_cells(f'A{row}:{last_col}{row}')
cell = f'A{row}'
ws[cell] = f'Ministry: {ministry}'
ws[cell].fill = title_fill
ws[cell].font = title_font
row = row + 1
ws.merge_cells(f'A{row}:{last_col}{row}')
cell = f'A{row}'
ws[cell] = f'Last updated: {now}'
ws[cell].fill = title_fill
ws[cell].font = title_font
row = row + 1
ws.merge_cells(f'A{row}:{last_col}{row}')
cell = f'A{row}'
ws[cell] = ''
ws[cell].fill = title_fill
ws[cell].font = title_font
row = row + 1
columns = [(f'A{row}', 'Member name', 30),
(f'B{row}', 'Address', 30),
(f'C{row}', 'Phone / email', 50)]
if want_birthday:
columns.append((f'D{row}', 'Birthday', 30))
for cell,value,width in columns:
ws[cell] = value
ws[cell].fill = title_fill
ws[cell].font = title_font
ws[cell].alignment = title_align
ws.column_dimensions[cell[0]].width = width
# Freeze the title row
row = row + 1
ws.freeze_panes = ws[f'A{row}']
#---------------------------------------------------------------------
def _append(row, col, value):
if value is None or len(value.strip()) == 0:
return row
_ = ws.cell(row=row, column=col, value=value)
return row + 1
# Data rows
for name in sorted(sorted_members):
m = sorted_members[name]
# The name will take 1 row
_ = ws.cell(row=row, column=1, value=m['email_name'])
# The address will take multiple rows
col = 2
last_row = row
f = m['family']
last_row = _append(col=col, row=last_row, value=f['StreetAddress1'])
last_row = _append(col=col, row=last_row, value=f['StreetAddress2'])
val = '{cs}, {zip}'.format(cs=f['city_state'], zip=f['StreetZip'])
last_row = _append(col=col, row=last_row, value=val)
addr_last_row = last_row
# The phone / email may be more than 1 row
col = 3
last_row = row
key = 'phones'
if key in m:
for phone in m[key]:
# Skip unlisted phone numbers
if phone['unlisted']:
log.info("SKIPPED UNLISTED NUMBER FOR {n}".format(n=m['full_name']))
continue
val = '{ph} {type}'.format(ph=phone['number'], type=phone['type'])
last_row = _append(col=col, row=last_row, value=val)
# If we have any preferred emails, list them all
key = 'preferred_emails'
if key in m and len(m[key]) > 0:
for email in m[key]:
last_row = _append(col=col, row=last_row, value=email['EMailAddress'])
email_last_row = last_row
# If we have no preferred emails, list the first alphabetic
# non-preferred email
else:
key = 'non_preferred_emails'
if key in m and len(m[key]) > 0:
emails = sorted([x['EMailAddress'] for x in m[key]])
last_row = _append(col=col, row=last_row,
value=emails[0])
email_last_row = last_row
# The birthday will only be 1 row
if want_birthday:
col = 4
key1 = 'MonthOfBirth'
key2 = 'DayOfBirth'
if key1 in m and key2 in m:
birthday = '{m} {d}'.format(m=m[key1], d=m[key2])
# Sometimes PDS has "None" in one of these two fields
if 'None' not in birthday:
_append(col=col, row=row, value=birthday)
# Between the address / phone+email, find the real last row
last_row = max(email_last_row, addr_last_row)
row = last_row + 1
#---------------------------------------------------------------------
wb.save(filename)
log.info(f'Wrote {filename}')
return filename
#-------------------------------------------------------------------
def upload_overwrite(filename, google, file_id, log):
# Strip the trailing ".xlsx" off the Google Sheet name
gsheet_name = filename
if gsheet_name.endswith('.xlsx'):
gsheet_name = gsheet_name[:-5]
try:
log.info('Uploading file update to Google file ID "{id}"'
.format(id=file_id))
metadata = {
'name' : gsheet_name,
'mimeType' : Google.mime_types['sheet'],
'supportsAllDrives' : True,
}
media = MediaFileUpload(filename,
mimetype=Google.mime_types['sheet'],
resumable=True)
file = google.files().update(body=metadata,
fileId=file_id,
media_body=media,
supportsAllDrives=True,
fields='id').execute()
log.debug('Successfully updated file: "{filename}" (ID: {id})'
.format(filename=filename, id=file['id']))
except:
log.error('Google file update failed for some reason:')
log.error(traceback.format_exc())
exit(1)
#-------------------------------------------------------------------
def create_roster(pds_members, ministry, google, gsheet_id,
want_birthday, log):
# Find the members
members = PDSChurch.filter_members_on_ministries(pds_members, [ministry])
if members is None or len(members) == 0:
log.info("No members in ministry: {min}".format(min=ministry))
return
# PDSChurch.filter_members() returns a dict. Turn this into a simple
# list of Members.
members = [ x for x in members.values() ]
# Make an xlsx
filename = write_xlsx(members=members, ministry=ministry,
want_birthday=want_birthday, log=log)
log.debug("Wrote temp XLSX file: {f}".format(f=filename))
# Upload the xlsx to Google
upload_overwrite(filename=filename, google=google, file_id=gsheet_id,
log=log)
log.debug("Uploaded XLSX file to Google")
# Remove the temp local XLSX file
try:
os.unlink(filename)
log.debug("Unlinked temp XLSX file")
except:
log.info("Failed to unlink temp XLSX file!")
log.error(traceback.format_exc())
####################################################################
def setup_cli_args():
tools.argparser.add_argument('--logfile',
help='Also save to a logfile')
tools.argparser.add_argument('--debug',
action='store_true',
default=False,
help='Be extra verbose')
tools.argparser.add_argument('--sqlite3-db',
required=True,
help='Location of PDS sqlite3 database')
global gapp_id
tools.argparser.add_argument('--app-id',
default=gapp_id,
help='Filename containing Google application credentials')
global guser_cred_file
tools.argparser.add_argument('--user-credentials',
default=guser_cred_file,
help='Filename containing Google user credentials')
args = tools.argparser.parse_args()
return args
####################################################################
#
# Main
#
####################################################################
def main():
args = setup_cli_args()
log = ECC.setup_logging(info=True,
debug=args.debug,
logfile=args.logfile)
log.info("Reading PDS data...")
(pds, pds_families,
pds_members) = PDSChurch.load_families_and_members(filename=args.sqlite3_db,
parishioners_only=False,
log=log)
apis = {
'drive' : { 'scope' : Google.scopes['drive'],
'api_name' : 'drive',
'api_version' : 'v3', },
}
services = GoogleAuth.service_oauth_login(apis,
app_json=args.app_id,
user_json=args.user_credentials,
log=log)
google = services['drive']
for ministry in ministries:
create_roster(pds_members=pds_members,
ministry=ministry['ministry'],
google=google,
gsheet_id=ministry['gsheet_id'],
want_birthday=ministry['birthday'],
log=log)
# All done
pds.connection.close()
if __name__ == '__main__':
main()
| [
"jeff@squyres.com"
] | jeff@squyres.com |
de35c3bd4ec2a2fe84243fd5123191d42f03790d | 7f939ccdc7068f1c441333296896274724100142 | /allocation/migrations/0008_auto_20170723_1409.py | 04f237178cf11730a622275047206a8e112c04ed | [] | no_license | happychallenge/vendorMgmt | c62584af3a7ab4538b36be2f46a5f23b83043495 | e703a04461151950a4b29e5ab2abb855205b21e6 | refs/heads/master | 2021-07-10T08:03:38.666214 | 2018-01-31T11:00:01 | 2018-01-31T11:00:01 | 96,496,999 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 483 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-23 14:09
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('products', '0015_auto_20170723_1352'),
('allocation', '0007_auto_20170723_0903'),
]
operations = [
migrations.AlterUniqueTogether(
name='supplyvendor',
unique_together=set([('supply', 'vendor')]),
),
]
| [
"happychallenge@outlook.com"
] | happychallenge@outlook.com |
837c22b02aaae92a95379ac2311f2991817f4eb1 | dcee93ce4b9fcf0a7ffa6ea658c403ed1fc84043 | /Meteor/src/logAnalysis/migrations/0003_auto_20170811_1009.py | 7bc9b2231774045650c9704355eb09a8a1d96d73 | [] | no_license | henryliuom/drv-study | 3eed96eef58138003371011034562a15ebc16b79 | dcab011bce0f34bcf50f8ab5601eb859a5a07cb7 | refs/heads/master | 2021-06-06T23:49:20.869907 | 2020-07-30T09:06:48 | 2020-07-30T09:06:48 | 95,858,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,080 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-11 02:09
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('logAnalysis', '0002_auto_20170802_1329'),
]
operations = [
migrations.AlterModelTable(
name='loganalysis_agent',
table='meteor_loganalysis_agent',
),
migrations.AlterModelTable(
name='loganalysis_area',
table='meteor_loganalysis_area',
),
migrations.AlterModelTable(
name='loganalysis_cdn',
table='meteor_loganalysis_cdn',
),
migrations.AlterModelTable(
name='loganalysis_http',
table='meteor_loganalysis_http',
),
migrations.AlterModelTable(
name='loganalysis_status',
table='meteor_loganalysis_status',
),
migrations.AlterModelTable(
name='loganalysis_upstream',
table='meteor_loganalysis_upstream',
),
]
| [
"henry@techdog.com"
] | henry@techdog.com |
ec494d5bca832ae69c64b84ea0386e25b19fa3ae | a432e6b90912eaf68b6f352e38ef62852f16e4ac | /hf/spike_detection.py | 26b1a6572e9d11ea1d176a73718aad5fde3f26fd | [] | no_license | shaggy63/pltrading | 461e9c09a96681be8fd8d2ae96338c4c42f9171f | 627ce90f826acca0ae7048a90c512d0189be136c | refs/heads/master | 2022-04-14T02:10:53.863858 | 2020-03-27T17:25:46 | 2020-03-27T17:25:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,454 | py | from math import sqrt
from dateutil import parser
from configparser import ConfigParser
from pandas import Series
import matplotlib.dates as mdates
from pandas import DataFrame
from pandas import concat
from pandas import read_csv
import time
from pandas import datetime
import platform
import pandas as pd
import pdb
import matplotlib.pyplot as plt
import matplotlib
if platform.platform() == "Darwin-18.7.0-x86_64-i386-64bit":
matplotlib.use("macOSX")
import matplotlib.dates as mdates
import numpy as np
import time
import json
import collections
from pyod.models.hbos import HBOS
import math
import numpy as np
import pandas
from datetime import datetime, timedelta
import os
import numpy as np
import pymannkendall as mk
pd.set_option("display.precision", 9)
pd.set_option('display.max_rows', 40000)
pd.options.mode.chained_assignment = None
backtest_mode = 1
datatype = "ticker3"
base_path = "/home/canercak/Desktop/dev/pltrading"
#base_path = "/home/canercak_gmail_com/pltrading"
if platform.platform() == "Darwin-18.7.0-x86_64-i386-64bit":
base_path = "/Users/apple/Desktop/dev/pltrading"
path = base_path +"/data/"+datatype+"/"
transaction_fee = 0.00125
initial_balance = 100
BUY, SELL, HOLD = 0, 1, 2
results = {}
conditions = [
{'name': 'MCO PATTERN', 'entry_price': 0, 'action': HOLD, 'trade_count': 0, 'balance': initial_balance, 'buy_mode': True}
#{'name': 'detect spike1', 'entry_price': 0, 'action': HOLD, 'trade_count': 0, 'balance': initial_balance, 'buy_mode': True}
]
EXCLUDE_SYMBOLS = ["SCBTC","NCASHBTC","ONEBTC","DOGEBTC","POEBTC","MFTBTC","DREPBTC","COCOSBTC","IOTXBTC","SNGLSBTC","ERDBTC","QKCBTC","TNBBTC","CELRBTC","TUSDBTC","ANKRBTC","HOTBTC","WPRBTC","QSPBTC","SNMBTC","HSRBTC","VENBTC","MITHBTC","CNDBTC","BCCBTC","DOCKBTC","DENTBTC","FUELBTC","BTCBBTC","SALTBTC","KEYBTC","SUBBTC","TCTBTC","CDTBTC","IOSTBTC","TRIGBTC","VETBTC","TROYBTC","NPXSBTC","BTTBTC","SCBBTC","WINBTC","RPXBTC","MODBTC","WINGSBTC","BCNBTC","PHXBTC","XVGBTC","FTMBTC","PAXBTC","ICNBTC","ZILBTC","CLOAKBTC","DNTBTC","TFUELBTC","PHBBTC","CHATBTC","STORMBTC"]
def add_features(df):
df = DataFrame(df)
df.columns = ['symbol','date','price_change','price_change_percent','last_price','best_bid_price','best_ask_price','total_traded_base_asset_volume','total_traded_quote_asset_volume']
df['qav_sma50'] = df.total_traded_quote_asset_volume.rolling(50).mean()
df['qav_sma100'] = df.total_traded_quote_asset_volume.rolling(100).mean()
df['qav_sma200'] = df.total_traded_quote_asset_volume.rolling(200).mean()
df['qav_sma400'] = df.total_traded_quote_asset_volume.rolling(400).mean()
df['qav_sma500'] = df.total_traded_quote_asset_volume.rolling(500).mean()
df['qav_sma1000'] = df.total_traded_quote_asset_volume.rolling(1000).mean()
df['last_sma50'] = df.last_price.rolling(50).mean()
df['last_sma100'] = df.last_price.rolling(100).mean()
df['last_sma200'] = df.last_price.rolling(200).mean()
df['last_sma400'] = df.last_price.rolling(400).mean()
df['last_sma600'] = df.last_price.rolling(600).mean()
df['last_sma1000'] = df.last_price.rolling(1000).mean()
return df
def plot_symbols():
dir = os.listdir(path)
SYMBOLS = ["GNTBTC"]
# for sym in dir:
# if ".py" not in sym and ".DS_Store" not in sym and sym.split('.csv')[0] not in EXCLUDE_SYMBOLS:
# SYMBOLS.append(sym.split(".csv")[0])
for symbol in SYMBOLS:
df = read_csv(path+symbol+".csv")
df = add_features(df)
plot_whole(df)
def delete_symbols():
for symbol in EXCLUDE_SYMBOLS:
try:
os.remove(path+symbol+".csv")
except:
print("ss")
def backtest():
if backtest_mode == 1:
SYMBOLS = []
dir = os.listdir(path)
for sym in dir:
if ".py" not in sym and ".DS_Store" not in sym and sym.split('.csv')[0] not in EXCLUDE_SYMBOLS:
SYMBOLS.append(sym.split(".csv")[0])
for isx, symbol in enumerate(SYMBOLS):
print(str(isx)+" of "+ str(len(SYMBOLS))+ " symbols")
df = read_csv(path+symbol+".csv")
df = add_features(df)
do_backtest(df,symbol)
elif backtest_mode == 2:
with open('/Users/apple/Desktop/dev/pltrading/hf/patterns/spike_patterns.json') as json_file:
patterns = json.load(json_file)
for pattern in patterns:
df = pd.read_csv("/Users/apple/Desktop/dev/pltrading/data/" + pattern['data'] +"/"+pattern['symbol']+".csv")
df = add_features(df)
df_x = df
do_backtest(df,pattern['symbol'],pattern['end'])
#plot_whole(df_x)
elif backtest_mode == 3:
#SYMBOLS = ["VIABTC","VITEBTC","STEEMBTC","SYSBTC","GRSBTC","WRXBTC"] #"RDNBTC"]#,"NXSBTC","RDNBTC",
SYMBOLS = ["ASTBTC"]#["NXSBTC"]#["VITEBTC"]
for symbol in SYMBOLS:
df = read_csv(path+symbol+".csv")
df = add_features(df)
#plot_whole(df)
do_backtest(df,symbol)
def do_backtest(df,symbol,end=None):
trade_count = 0
trade_history = []
balance = initial_balance
win_count = 0
loss_count = 0
profit = 0
action = HOLD
current_tick = 0
entry_tick = 0
buy_mode = True
entry_price = 0
buy_index = 0
window_size = 1000
last_size = 20
if backtest_mode==2:
df = df.iloc[end - window_size*1-100:end+window_size*2]
elif backtest_mode==3:
df_x = df
df = df.iloc[161385:165385]
# fragment = detect_anomaly(df)
#detect_anomaly(df.iloc[11706:11074])
#plot_whole(df_x)
# pdb.set_trace()
df = df.reset_index()
df = df.fillna(0)
for i, row in df.iterrows():
start_time = time.time()
current_price = row['last_price']
current_ask_price = row['best_ask_price']
current_bid_price = row['best_bid_price']
current_tick += 1
if i > window_size:
last = df.iloc[i,:]
prev1 = df.iloc[i-2,:]
prev25 = df.iloc[i-25,:]
prev50 = df.iloc[i-50,:]
prev100 = df.iloc[i-100,:]
prev200 = df.iloc[i-200,:]
diffx1 = last.qav_sma500 - last.qav_sma1000
diffx2 = prev50.qav_sma500 - prev50.qav_sma1000
diffx3 = prev100.qav_sma500 - prev100.qav_sma1000
diffx4 = prev200.qav_sma500 - prev200.qav_sma1000
first_check = (
last.qav_sma500 > last.qav_sma1000 and
prev50.qav_sma500 > prev50.qav_sma1000 and
prev100.qav_sma500 > prev100.qav_sma1000 and
prev200.qav_sma500 > prev200.qav_sma1000 and
last.qav_sma500 > prev50.qav_sma500 > prev100.qav_sma500 > prev200.qav_sma500 and
diffx1 > diffx2 > diffx3 > diffx4 and
diffx1 > 0.05
)
# if last['index'] == 114395:
# pdb.set_trace()
if (first_check == True and conditions[0]['buy_mode'] == True):
fragment = df.iloc[i-window_size:i,:]
fragment = detect_anomaly(fragment)
fragment = fragment.reset_index()
last = fragment.iloc[-1,:]
prev1 = fragment.iloc[-2,:]
first_n = fragment[:window_size-last_size]
last_n = fragment[-last_size:]
mk_test = mk.original_test(fragment.change_qav.to_numpy())
fragment_sum = fragment.groupby(['score_qav', 'label_qav'], as_index=False, sort=False)[[ "change_qav", "change_price"]].sum()
conditions[0]['buy_cond'] =(
(first_n.label_qav == 0).all() and
fragment_sum[fragment_sum['label_qav'] == 1].change_qav.sum() > 5 and
fragment_sum[fragment_sum['label_qav'] == 1].change_qav.sum() < 15 and
(fragment_sum[fragment_sum['label_qav'] == 1].change_qav >1).all() and
(fragment_sum[fragment_sum['label_qav'] == 1].change_qav <10).all() and
(
(fragment_sum[fragment_sum['label_qav'] == 1].change_qav.is_monotonic_increasing) or
(fragment_sum[fragment_sum['label_qav'] == 1].change_qav.is_monotonic_decreasing and (fragment[fragment['label_qav'] == 1].change_qav > 0).all())
) and
mk_test.z > 1 and
len(fragment_sum) >= 3 and
fragment_sum.label_qav.iloc[0] == 0 and
fragment_sum.label_qav.iloc[-1] == 1 and
fragment_sum.label_qav.iloc[-2] == 1 and
fragment_sum.iloc[-1].change_price + fragment_sum.iloc[-2].change_price > 0 and
fragment_sum.change_price.sum() > 0.5
)
elif (conditions[0]['buy_mode'] == False):
conditions[0]['sell_cond'] = (last['last_sma600'] < prev1['last_sma600'])
else:
continue
for ic, cond in enumerate(conditions):
if cond['buy_mode'] and cond['buy_cond']:
conditions[ic]['action'] = BUY
conditions[ic]['entry_price'] = current_ask_price
conditions[ic]['buy_mode'] = False
if ic ==0:
printLog("CONDITION " + str(ic+1) +" IS BUYING....")
printLog("##### TRADE " + str(cond['trade_count']) + " #####")
printLog("BUY: " +symbol+" for "+ str(cond['entry_price']) + " at " + str(last.date) + " - index: " + str(last['index']))
printLog(fragment[['index','date','symbol','last_price', 'total_traded_quote_asset_volume', 'label_qav', 'score_qav','change_qav','change_price']].tail(100))
printLog(mk.original_test(fragment.change_qav.to_numpy()))
printLog(fragment_sum)
printLog("diffx1: " + str(diffx1))
printLog("last.qav_sma500: " + str(last.qav_sma500))
printLog("last.qav_sma1000: " + str(last.qav_sma1000))
printLog("prev100.qav_sma500: " + str(prev100.qav_sma500))
printLog("prev100.qav_sma1000: " + str(prev100.qav_sma1000))
#pdb.set_trace()
elif not cond['buy_mode'] and cond['sell_cond']:
printLog("CONDITION " + str(ic+1) +" IS SELLING....")
conditions[ic]['action'] = SELL
exit_price = current_bid_price
profit = ((exit_price - cond['entry_price'])/cond['entry_price'] + 1)*(1-transaction_fee)**2 - 1
conditions[ic]['balance'] = conditions[ic]['balance'] * (1.0 + profit)
conditions[ic]['trade_count'] += 1
conditions[ic]['buy_mode'] = True
printLog("SELL: " + symbol+" for "+ str(exit_price) + " at " + str(last.date) + " - index: " + str(last['index']))
printLog("PROFIT: " + str(profit*100))
printLog("BALANCE: " + str(cond['balance']))
else:
conditions[ic]['action'] = HOLD
if (current_tick > len(df)-1):
printLog("*********TOTAL RESULTS*************************")
for ic, cond in enumerate(conditions):
printLog("SYMBOL: "+ symbol)
printLog("CONDITION NUMBER: "+ str(ic))
printLog("TOTAL BALANCE: "+ str(cond['balance']))
printLog("TRADE COUNT: "+ str(cond['trade_count']))
printLog("**********************************")
if i % 1000 == 0:
printLog(symbol+"-"+str(row['index']))
def plot_buy_sell(trade_history):
closes = [data[2] for data in trade_history]
closes_index = [data[1] for data in trade_history]
buy_tick = np.array([data[1] for data in trade_history if data[0] == 0])
buy_price = np.array([data[2] for data in trade_history if data[0] == 0])
sell_tick = np.array([data[1] for data in trade_history if data[0] == 1])
sell_price = np.array([data[2] for data in trade_history if data[0] == 1])
plt.plot(closes_index, closes)
plt.scatter(buy_tick, buy_price, c='g', marker="^", s=50)
plt.scatter(sell_tick, sell_price , c='r', marker="v", s=50)
plt.show(block=True)
def detect_trend(df):
decomposition = seasonal_decompose(df.total_traded_quote_asset_volume, model='multiplicative', extrapolate_trend='freq', period=100)
matplotlib.rcParams['figure.figsize'] = [9.0,5.0]
fig = decomposition.plot()
trace1 = go.Scatter(
x = df.date,y = decomposition.trend,
name = 'Trend'
)
trace2 = go.Scatter(
x = df.date,y = decomposition.seasonal,
name = 'Seasonal'
)
trace3 = go.Scatter(
x = df.date,y = decomposition.resid,
name = 'Residual'
)
trace4 = go.Scatter(
x = df.date,y = df.total_traded_quote_asset_volume,
name = 'Mean Stock Value'
)
plt.show()
def trendline(data, order=1):
coeffs = np.polyfit(data.index.values, list(data), order)
slope = coeffs[-2]
return float(slope)
def plot_whole(df):
plt.clf()
fig, axes = plt.subplots(nrows=2, ncols=1)
df.total_traded_quote_asset_volume.plot(ax=axes[0] , color="blue", style='.-')
# df.qav_sma50.plot(ax=axes[0], color="red")
# df.qav_sma100.plot(ax=axes[0], color="orange")
df.qav_sma500.plot(ax=axes[0], color="purple")
df.qav_sma1000.plot(ax=axes[0], color="brown")
df.last_price.plot(ax=axes[1], style='.-')
df.last_sma100.plot(ax=axes[1], color="yellow")
df.last_sma200.plot(ax=axes[1], color="purple")
df.last_sma600.plot(ax=axes[1], color="black")
df.last_sma1000.plot(ax=axes[1], color="green")
plt.title(df.iloc[-1].symbol)
plt.show()
def plot_trades(df):
pdb.set_trace()
df.plot(x='close', y='mark',style='.-',linestyle='-', marker='o', markerfacecolor='black')
trade_history.plot(x='action', y='current_price',linestyle='-', marker='o', markerfacecolor='black', plot_data_points= True)
def detect_anomaly(df):
df = df.fillna(0)
clf =HBOS()
x_values = df.index.values.reshape(df.index.values.shape[0],1)
y_values = df.total_traded_quote_asset_volume.values.reshape(df.total_traded_quote_asset_volume.values.shape[0],1)
clf.fit(y_values)
clf.predict(y_values)
df["label_qav"] = clf.predict(y_values)
df["score_qav"] = clf.decision_function(y_values)#.round(6)
df['change_qav'] = df.total_traded_quote_asset_volume.pct_change(periods=1)*100
df['change_price'] = df.last_price.pct_change(periods=1)*100
return df
def plot_four_subplots():
fig, axes = plt.subplots(nrows=2, ncols=2)
df.total_traded_quote_asset_volume.plot(ax=axes[0,0])
df.total_traded_base_asset_volume.plot(ax=axes[0,1])
df.last_price.plot(ax=axes[1,0])
df.price_change_percent.plot(ax=axes[1,1])
def search_sequence_numpy(arr,seq):
# Store sizes of input array and sequence
Na, Nseq = arr.size, seq.size
# Range of sequence
r_seq = np.arange(Nseq)
# Create a 2D array of sliding indices across the entire length of input array.
# Match up with the input sequence & get the matching starting indices.
M = (arr[np.arange(Na-Nseq+1)[:,None] + r_seq] == seq).all(1)
# Get the range of those indices as final output
if M.any() >0:
return np.where(np.convolve(M,np.ones((Nseq),dtype=int))>0)[0]
else:
return [] # No match found
def pct_change(first, second):
diff = second - first
change = 0
try:
if diff > 0:
change = (diff / first) * 100
elif diff < 0:
diff = first - second
change = -((diff / first) * 100)
except ZeroDivisionError:
return float('inf')
return change
def print_df(df):
with pd.option_context('display.max_rows', None):
print(df)
def printLog(*args, **kwargs):
print(*args, **kwargs)
with open(base_path+'/logs/testmode_'+str(backtest_mode)+".txt",'a') as file:
print(*args, **kwargs, file=file)
if __name__ == '__main__':
#$plot_symbols()
backtest()
| [
"canercak@gmail.com"
] | canercak@gmail.com |
f58f43a4575f64887bb87422cc4d237f32db62bc | 282ec49f8ce8aa176c24e4f13a8852c9b0752e4a | /forgery/simple-manager/gui/main_window.py | fd9291d8160c763d132090a7548b411a28a98454 | [] | no_license | montreal91/workshop | b118b9358094f91defdae1d11ff8a1553d67cee6 | 8c05e15417e99d7236744fe9f960f4d6b09e4e31 | refs/heads/master | 2023-05-22T00:26:09.170584 | 2023-01-28T12:41:08 | 2023-01-28T12:41:08 | 40,283,198 | 3 | 1 | null | 2023-05-01T20:19:11 | 2015-08-06T03:53:44 | C++ | UTF-8 | Python | false | false | 1,537 | py | from random import shuffle
# from PySide.QtCore import Slot
from PySide.QtGui import QWidget
from gui.widgets.main_window_ui import Ui_JMainWindow
from club import JClub
from league import JLeague
club_names = [
"Canberra Masters",
"Sydney Kangaroos",
"Dandenong Pianists",
"Melbourne Slams",
"Melbourne Rockets",
"Darwin Genes",
"Kingston Whales",
"Brisbane Rangers",
"Adelaide Thrashers",
"Perth Penguins"
]
class JMainWindow(QWidget):
def __init__(self):
super(JMainWindow, self).__init__()
self.widget = Ui_JMainWindow()
self.widget.setupUi(self)
self._league = None
def CreateLeague(self, param_dict):
self._league = JLeague(
days=param_dict["days"],
divisions=param_dict["divs"],
indiv_matches=param_dict["in_div_games"],
exdiv_matches=param_dict["out_div_games"]
)
tennis_players = 4
t_players_list = [i+1 for i in range(len(club_names) * tennis_players)]
shuffle(t_players_list)
for i in range(len(club_names)):
if i + 1 == 1:
club = JClub(club_id=i+1, name=club_names[i], playable=True)
else:
club = JClub(club_id=i+1, name=club_names[i], playable=False)
for j in range(tennis_players):
club.AddPlayer(t_players_list.pop())
self._league.AddClub(club)
self._league.CreateSchedule()
| [
"nefedov.alexander91@yandex.ru"
] | nefedov.alexander91@yandex.ru |
655f6bdc4c7f5df0fd7c9a5464f13dfc8f420f3c | 45e376ae66b78b17788b1d3575b334b2cb1d0b1c | /checkov/terraform/checks/resource/aws/SSMDocumentsArePrivate.py | 60963dea36c96ee18408e724b9898d385cb74fa2 | [
"Apache-2.0"
] | permissive | bridgecrewio/checkov | aeb8febed2ed90e61d5755f8f9d80b125362644d | e64cbd27ffb6f09c2c9f081b45b7a821a3aa1a4d | refs/heads/main | 2023-08-31T06:57:21.990147 | 2023-08-30T23:01:47 | 2023-08-30T23:01:47 | 224,386,599 | 5,929 | 1,056 | Apache-2.0 | 2023-09-14T20:10:23 | 2019-11-27T08:55:14 | Python | UTF-8 | Python | false | false | 1,120 | py |
from checkov.common.models.enums import CheckCategories
from checkov.terraform.checks.resource.base_resource_negative_value_check import BaseResourceNegativeValueCheck
class SSMDocumentsArePrivate(BaseResourceNegativeValueCheck):
def __init__(self):
"""
NIST.800-53.r5 AC-21, NIST.800-53.r5 AC-3, NIST.800-53.r5 AC-3(7), NIST.800-53.r5 AC-4, NIST.800-53.r5 AC-4(21),
NIST.800-53.r5 AC-6, NIST.800-53.r5 SC-7, NIST.800-53.r5 SC-7(11), NIST.800-53.r5 SC-7(16),
NIST.800-53.r5 SC-7(20), NIST.800-53.r5 SC-7(21), NIST.800-53.r5 SC-7(3), NIST.800-53.r5 SC-7(4),
NIST.800-53.r5 SC-7(9)
"""
name = "Ensure SSM documents are not Public"
id = "CKV_AWS_303"
supported_resources = ['aws_ssm_document']
categories = [CheckCategories.GENERAL_SECURITY]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def get_inspected_key(self) -> str:
return "permissions/[0]/account_ids"
def get_forbidden_values(self) -> str:
return "All"
check = SSMDocumentsArePrivate()
| [
"noreply@github.com"
] | bridgecrewio.noreply@github.com |
37d4789329c8c3bf49c5bf0697aeb0465cc52a8f | b0c8e0cafa4a8916faab3cce65756ae91426c43f | /study/Python/VCWeek6/BOJ_14677_강의현.py | a37da96e2703bab006f6f7073b11ede71c4291ce | [] | no_license | Rurril/IT-DA-3rd | b3e3ec3c2a5efbc75b76b84e9002c27a0ba4a1c4 | 9985e237cb1b90e9609656d534e0ed164723e281 | refs/heads/master | 2022-07-22T15:26:39.085369 | 2021-11-23T13:30:06 | 2021-11-23T13:30:06 | 288,980,334 | 3 | 29 | null | 2020-11-05T10:25:30 | 2020-08-20T10:49:17 | Java | UTF-8 | Python | false | false | 971 | py | # 병약한 윤호 - G5
import sys
from collections import deque
def bfs():
que=deque()
que.append((0,0))
day=-1
order=0
while que:
size=len(que)
while True:
if size==0:
break
left, right=que.popleft()
if left+right<T:
if pill[left]==BLD[order] and dp[left+1][right]==0:
que.append((left+1,right))
dp[left+1][right]=1
if pill[T-1-right]==BLD[order] and dp[left][right+1]==0:
que.append((left, right+1))
dp[left][right+1]=1
size-=1
order=(order+1)%3
day+=1
return day
if __name__=="__main__":
N=int(sys.stdin.readline())
T=3*N
BLD=['B','L','D']
pill=list(sys.stdin.readline().rstrip())
dp=[[0 for _ in range(T+1)] for _ in range(T+1)]
dp[0][0]=1
print(bfs()) | [
"riverkeh@naver.com"
] | riverkeh@naver.com |
a5260f4c75d1782d5851bebe940fca53f07fce4d | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_2751486_0/Python/viswabharathi/prob1.py | e936ba3450edac54600bc6b01b6579200d489771 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 729 | py | t = int(raw_input())
for i in xrange(t):
name, n = raw_input().split()
n = int(n)
l = len(name)
tmp = 0
res = 0
start = 0
cstart = 0
for j, ch in enumerate(name):
if ch not in ('a', 'e', 'i', 'o', 'u'):
tmp += 1
else:
tmp = 0
cstart = j+1
if tmp >= n:
if cstart - start > 0:
#print " test ", cstart, start, j , l
res += ( (cstart - start + 1 ) * (l - j ))
else:
res += (l - j)
#print name[start:], res, cstart
cstart += 1
start = cstart
print "Case #%s: %s"% (i+1, res)
| [
"eewestman@gmail.com"
] | eewestman@gmail.com |
a54650b300fda35bc32be1b5af773c9e8c81720b | 181af10fcf40b824fe92d3b8f72fd15d6d1490c2 | /Contests/201-300/week 202/1552. Magnetic Force Between Two Balls/Magnetic Force Between Two Balls.py | 9e1e6d05fc8941b65b0940ab4af14bb1f0f32a08 | [] | no_license | wangyendt/LeetCode | 402c59a0b7b7f5b3a672231ea5dad8056ade36af | 4a3ba15284c45b2d8bf38306c8c8526ae174615c | refs/heads/master | 2023-08-10T06:27:54.995152 | 2023-08-10T02:22:27 | 2023-08-10T02:22:27 | 176,651,399 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 757 | py | #!/usr/bin/env python
# encoding: utf-8
"""
@author: Wayne
@contact: wangye.hope@gmail.com
@software: PyCharm
@file: Magnetic Force Between Two Balls
@time: 2020/08/17 00:23
"""
class Solution:
def maxDistance(self, position: list, m: int) -> int:
n = len(position)
position.sort()
def count(d):
ans, curr = 1, position[0]
for i in range(1, n):
if position[i] - curr >= d:
ans += 1
curr = position[i]
return ans
l, r = 0, position[-1] - position[0]
while l < r:
mid = r - (r - l) // 2
if count(mid) >= m:
l = mid
else:
r = mid - 1
return l
| [
"905317742@qq.com"
] | 905317742@qq.com |
8aa97fb643114e4e113b2ed33a8a305fcf061797 | 5ab2ccf70fddd30ea88155f2a5adb0711bf3dc9a | /Chap7/badfloatcheck.py | 4c52a6f76ac19e3eefba5d51fb650c03b2210a82 | [] | no_license | jdukosse/LOI_Python_course-SourceCode | 32d66fd79344e9ab9412a6da373f2093b39cad92 | bf13907dacf5b6e95f84885896c8f478dd208011 | refs/heads/master | 2020-12-05T23:27:53.862508 | 2020-01-24T13:42:28 | 2020-01-24T13:42:28 | 232,276,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | def main():
# Count to ten by tenths
i = 0.0
while i != 1.0:
print("i =", i)
i += 0.1
main()
| [
"jdukosse@hotmail.com"
] | jdukosse@hotmail.com |
4ace3d37196da36960499f3b695393f79a07f88b | 1d69ab8bc1ae1cb7f576f11eafbd72f9b6032d76 | /one_off/watertest.py.bak | 354b7a363d8581560e1b9c143d8c5b9c46435234 | [] | no_license | FDI-IT/fd | d40271a8ba51908914bfefc7574d86e6dbdb0d90 | 862ec4bdf8da94a29856c11f35b5202af73017b7 | refs/heads/python2 | 2022-12-03T17:27:05.767487 | 2020-07-15T19:01:41 | 2020-07-15T19:01:41 | 221,480,272 | 0 | 0 | null | 2022-11-22T02:11:00 | 2019-11-13T14:39:10 | Python | UTF-8 | Python | false | false | 924 | bak | from haccp.models import WaterTest
from datetime import date
import random
from decimal import Decimal
start_date = date(2009,1,7)
delta = date(2009,1,14) - start_date
vacation_dates = (date(2009,12,30),date(2010,12,29),date(2011,12,28),date(2012,12,26))
def populate_results():
WaterTest.objects.all().delete()
test_date = start_date
zone = 1
while(test_date < date.today()):
for x in range(1,5):
test_result = Decimal(random.randrange(1,7,1))/10
wt = WaterTest(test_date=test_date,
zone=zone,
test_result=test_result)
wt.save()
zone += 1
if zone > 18:
zone = 1
test_date += delta
if test_date in vacation_dates:
print test_date
test_date += delta
| [
"doofus1102@gmail.com"
] | doofus1102@gmail.com |
af80e7eaf4c229b7185cc7b60832b6ea14b89afb | c8036cb365243439b4a3593124eafdfba933a034 | /src/pytorch-resnext-32x8d-centercrop-epoch10.py | 14ecbd8f8981e85f25eafa970a134b791220f81a | [] | no_license | koike-ya/rsna | 3a1150dc878bde6320ae4c1d965675460dd7de0d | c88c45cfa280b47f0fb48cc9df88954f83a551b4 | refs/heads/master | 2022-03-16T00:36:55.846905 | 2019-11-02T00:49:15 | 2019-11-02T00:49:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,340 | py |
dir_csv = '../input/'
dir_train_img = '../input/stage_1_train_pngs/'
dir_test_img = '../input/stage_1_test_pngs/'
# In[2]:
# Parameters
n_classes = 6
n_epochs = 10
batch_size = 32
# # Setup
#
# Need to grab a couple of extra libraries
#
# - Nvidia Apex for mixed precision training (https://github.com/NVIDIA/apex)
# In[3]:
# Installing useful libraries
# !git clone https://github.com/NVIDIA/apex && cd apex && pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./
# In[4]:
# Libraries
import glob
import os
from pathlib import Path
import cv2
import numpy as np
import pandas as pd
import pydicom
import torch
import torch.optim as optim
from albumentations import Compose, ShiftScaleRotate, CenterCrop, HorizontalFlip, RandomBrightnessContrast
from albumentations.pytorch import ToTensor
from matplotlib import pyplot as plt
from skimage.transform import resize
from torch.utils.data import Dataset
from tqdm import tqdm as tqdm
from apex import amp
# In[5]:
CT_LEVEL = 40
CT_WIDTH = 150
def rescale_pixelarray(dataset):
image = dataset.pixel_array
rescaled_image = image * dataset.RescaleSlope + dataset.RescaleIntercept
rescaled_image[rescaled_image < -1024] = -1024
return rescaled_image
def set_manual_window(hu_image, custom_center, custom_width):
min_value = custom_center - (custom_width / 2)
max_value = custom_center + (custom_width / 2)
hu_image[hu_image < min_value] = min_value
hu_image[hu_image > max_value] = max_value
return hu_image
# Functions
class IntracranialDataset(Dataset):
def __init__(self, csv_file, data_dir, labels, ct_level=0, ct_width=0, transform=None):
self.data_dir = data_dir
self.data = pd.read_csv(csv_file)
self.transform = transform
self.labels = labels
self.level = ct_level
self.width = ct_width
self.nn_input_shape = (224, 224)
def __len__(self):
return len(self.data)
def resize(self, image):
image = resize(image, self.nn_input_shape)
return image
def fill_channels(self, image):
filled_image = np.stack((image,)*3, axis=-1)
return filled_image
def _get_hounsfield_window(self, dicom):
hu_image = rescale_pixelarray(dicom)
windowed_image = set_manual_window(hu_image, self.level, self.width)
return windowed_image
def _load_dicom_to_image(self, file_path):
dicom = pydicom.dcmread(file_path)
windowed_image = self._get_hounsfield_window(dicom)
image = self.fill_channels(self.resize(windowed_image))
return image
def __getitem__(self, idx):
file_path = os.path.join(self.data_dir, self.data.loc[idx, 'Image'] + '.png')
from pathlib import Path
if not Path(file_path).is_file():
return self.__getitem__(idx + 1)
# img = self._load_dicom_to_image(file_path)
img = cv2.imread(file_path)
if self.transform:
augmented = self.transform(image=img)
img = augmented['image']
if self.labels:
labels = torch.tensor(
self.data.loc[idx, ['epidural', 'intraparenchymal', 'intraventricular', 'subarachnoid', 'subdural', 'any']])
return {'image': img, 'labels': labels}
else:
return {'image': img}
# # CSV
# In[7]:
# CSVs
if __name__ == '__main__':
train = pd.read_csv(os.path.join(dir_csv, 'stage_1_train.csv'))
test = pd.read_csv(os.path.join(dir_csv, 'stage_1_sample_submission.csv'))
# In[8]:
# Split train out into row per image and save a sample
train[['ID', 'Image', 'Diagnosis']] = train['ID'].str.split('_', expand=True)
train = train[['Image', 'Diagnosis', 'Label']]
train.drop_duplicates(inplace=True)
train = train.pivot(index='Image', columns='Diagnosis', values='Label').reset_index()
train['Image'] = 'ID_' + train['Image']
train.head()
# In[9]:
undersample_seed=0
train["any"].value_counts()
# In[10]:
num_ill_patients = train[train["any"]==1].shape[0]
num_ill_patients
# In[11]:
healthy_patients = train[train["any"]==0].index.values
healthy_patients_selection = np.random.RandomState(undersample_seed).choice(
healthy_patients, size=num_ill_patients, replace=False
)
len(healthy_patients_selection)
# In[12]:
sick_patients = train[train["any"]==1].index.values
selected_patients = list(set(healthy_patients_selection).union(set(sick_patients)))
len(selected_patients)/2
# In[13]:
new_train = train.loc[selected_patients].copy()
new_train["any"].value_counts()
# In[14]:
# Some files didn't contain legitimate images, so we need to remove them
png = glob.glob(os.path.join(dir_train_img, '*.png'))
png = [os.path.basename(png)[:-4] for png in png]
png = np.array(png)
train = train[train['Image'].isin(png)]
train.to_csv('train.csv', index=False)
# In[15]:
# Also prepare the test data
test[['ID','Image','Diagnosis']] = test['ID'].str.split('_', expand=True)
test['Image'] = 'ID_' + test['Image']
test = test[['Image', 'Label']]
test.drop_duplicates(inplace=True)
test.to_csv('test.csv', index=False)
# # DataLoaders
# In[16]:
# Data loaders
transform_train = Compose([CenterCrop(200, 200),
#Resize(224, 224),
HorizontalFlip(),
RandomBrightnessContrast(),
ShiftScaleRotate(),
ToTensor()
])
transform_test= Compose([CenterCrop(200, 200),
#Resize(224, 224),
ToTensor()
])
train_dataset = IntracranialDataset(
csv_file='train.csv', data_dir=dir_train_img, transform=transform_train, labels=True)
test_dataset = IntracranialDataset(
csv_file='test.csv', data_dir=dir_test_img, transform=transform_test, labels=False)
data_loader_train = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=False, num_workers=8)
data_loader_test = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=8)
# In[17]:
len(train_dataset)
# In[18]:
# Plot train example
batch = next(iter(data_loader_train))
fig, axs = plt.subplots(1, 5, figsize=(15,5))
for i in np.arange(5):
axs[i].imshow(np.transpose(batch['image'][i].numpy(), (1,2,0))[:,:,0], cmap=plt.cm.bone)
# In[19]:
# Plot test example
batch = next(iter(data_loader_test))
fig, axs = plt.subplots(1, 5, figsize=(15,5))
for i in np.arange(5):
axs[i].imshow(np.transpose(batch['image'][i].numpy(), (1,2,0))[:,:,0], cmap=plt.cm.bone)
device = torch.device("cuda:0")
model = torch.hub.load('facebookresearch/WSL-Images', 'resnext101_32x8d_wsl')
model.fc = torch.nn.Linear(2048, n_classes)
model.to(device)
criterion = torch.nn.BCEWithLogitsLoss()
plist = [{'params': model.parameters(), 'lr': 2e-5}]
optimizer = optim.Adam(plist, lr=2e-5)
model, optimizer = amp.initialize(model, optimizer, opt_level="O1")
for epoch in range(n_epochs):
print('Epoch {}/{}'.format(epoch, n_epochs - 1))
print('-' * 10)
model.train()
tr_loss = 0
tk0 = tqdm(data_loader_train, desc="Iteration")
for step, batch in enumerate(tk0):
inputs = batch["image"]
labels = batch["labels"]
inputs = inputs.to(device, dtype=torch.float)
labels = labels.to(device, dtype=torch.float)
outputs = model(inputs)
loss = criterion(outputs, labels)
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
# loss.backward()
tr_loss += loss.item()
optimizer.step()
optimizer.zero_grad()
if epoch == 1 and step > 6000:
epoch_loss = tr_loss / 6000
print('Training Loss: {:.4f}'.format(epoch_loss))
break
epoch_loss = tr_loss / len(data_loader_train)
print('Training Loss: {:.4f}'.format(epoch_loss))
# # Inference
# In[ ]:
# Inference
for param in model.parameters():
param.requires_grad = False
model.eval()
test_pred = np.zeros((len(test_dataset) * n_classes, 1))
for i, x_batch in enumerate(tqdm(data_loader_test)):
x_batch = x_batch["image"]
x_batch = x_batch.to(device, dtype=torch.float)
with torch.no_grad():
pred = model(x_batch)
test_pred[(i * batch_size * n_classes):((i + 1) * batch_size * n_classes)] = torch.sigmoid(
pred).detach().cpu().reshape((len(x_batch) * n_classes, 1))
# # Submission
# In[ ]:
# Submission
submission = pd.read_csv(os.path.join(dir_csv, 'stage_1_sample_submission.csv'))
submission = pd.concat([submission.drop(columns=['Label']), pd.DataFrame(test_pred)], axis=1)
submission.columns = ['ID', 'Label']
submission.to_csv(f'{Path(__file__).name}_sub.csv', index=False)
submission.head()
| [
"makeffort134@gmail.com"
] | makeffort134@gmail.com |
276172e148598912da58f9737d273252526d5dc8 | 7f1e0158e70b69bfa353661bfb2eabda9ee5c56c | /dnacentersdk/models/validators/v2_2_1/jsd_fe06867e548bba1919024b40d992.py | 6db535faef9734e104abbded4ca2bbe34a05cafe | [
"MIT"
] | permissive | Jerbuck/dnacentersdk | 97fb11844410ec7ab49aec35a30979d6288a87fd | ef2adde6113e7a6acd28a287007eb470fa39d31f | refs/heads/master | 2023-07-31T13:43:01.108243 | 2021-09-14T17:41:19 | 2021-09-14T17:41:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,121 | py | # -*- coding: utf-8 -*-
"""Cisco DNA Center SyncDevices data model.
Copyright (c) 2019-2021 Cisco Systems.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from dnacentersdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidatorFe06867E548BBa1919024B40D992(object):
"""SyncDevices request schema definition."""
def __init__(self):
super(JSONSchemaValidatorFe06867E548BBa1919024B40D992, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"$schema": "http://json-schema.org/draft-04/schema#",
"properties": {
"cliTransport": {
"type": "string"
},
"computeDevice": {
"type": "boolean"
},
"enablePassword": {
"type": "string"
},
"extendedDiscoveryInfo": {
"type": "string"
},
"httpPassword": {
"type": "string"
},
"httpPort": {
"type": "string"
},
"httpSecure": {
"type": "boolean"
},
"httpUserName": {
"type": "string"
},
"ipAddress": {
"items": {
"type": "string"
},
"type": "array"
},
"merakiOrgId": {
"items": {
"type": "string"
},
"type": "array"
},
"netconfPort": {
"type": "string"
},
"password": {
"type": "string"
},
"serialNumber": {
"type": "string"
},
"snmpAuthPassphrase": {
"type": "string"
},
"snmpAuthProtocol": {
"type": "string"
},
"snmpMode": {
"type": "string"
},
"snmpPrivPassphrase": {
"type": "string"
},
"snmpPrivProtocol": {
"type": "string"
},
"snmpROCommunity": {
"type": "string"
},
"snmpRWCommunity": {
"type": "string"
},
"snmpRetry": {
"type": "integer"
},
"snmpTimeout": {
"type": "integer"
},
"snmpUserName": {
"type": "string"
},
"snmpVersion": {
"type": "string"
},
"type": {
"enum": [
"COMPUTE_DEVICE",
"MERAKI_DASHBOARD",
"NETWORK_DEVICE",
"NODATACHANGE"
],
"type": "string"
},
"updateMgmtIPaddressList": {
"items": {
"properties": {
"existMgmtIpAddress": {
"type": "string"
},
"newMgmtIpAddress": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"userName": {
"type": "string"
}
},
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
| [
"wastorga@altus.co.cr"
] | wastorga@altus.co.cr |
afdb4e602c32338a2e0552721dc3c14860833ca9 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/network/v20200401/get_local_network_gateway.py | 9c6a84c850459931be4dd4496034752ac1f109fa | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 7,651 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetLocalNetworkGatewayResult',
'AwaitableGetLocalNetworkGatewayResult',
'get_local_network_gateway',
]
@pulumi.output_type
class GetLocalNetworkGatewayResult:
"""
A common class for general resource information.
"""
def __init__(__self__, bgp_settings=None, etag=None, fqdn=None, gateway_ip_address=None, id=None, local_network_address_space=None, location=None, name=None, provisioning_state=None, resource_guid=None, tags=None, type=None):
if bgp_settings and not isinstance(bgp_settings, dict):
raise TypeError("Expected argument 'bgp_settings' to be a dict")
pulumi.set(__self__, "bgp_settings", bgp_settings)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if fqdn and not isinstance(fqdn, str):
raise TypeError("Expected argument 'fqdn' to be a str")
pulumi.set(__self__, "fqdn", fqdn)
if gateway_ip_address and not isinstance(gateway_ip_address, str):
raise TypeError("Expected argument 'gateway_ip_address' to be a str")
pulumi.set(__self__, "gateway_ip_address", gateway_ip_address)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if local_network_address_space and not isinstance(local_network_address_space, dict):
raise TypeError("Expected argument 'local_network_address_space' to be a dict")
pulumi.set(__self__, "local_network_address_space", local_network_address_space)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_guid and not isinstance(resource_guid, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
pulumi.set(__self__, "resource_guid", resource_guid)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="bgpSettings")
def bgp_settings(self) -> Optional['outputs.BgpSettingsResponse']:
"""
Local network gateway's BGP speaker settings.
"""
return pulumi.get(self, "bgp_settings")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def fqdn(self) -> Optional[str]:
"""
FQDN of local network gateway.
"""
return pulumi.get(self, "fqdn")
@property
@pulumi.getter(name="gatewayIpAddress")
def gateway_ip_address(self) -> Optional[str]:
"""
IP address of local network gateway.
"""
return pulumi.get(self, "gateway_ip_address")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="localNetworkAddressSpace")
def local_network_address_space(self) -> Optional['outputs.AddressSpaceResponse']:
"""
Local network site address space.
"""
return pulumi.get(self, "local_network_address_space")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the local network gateway resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> str:
"""
The resource GUID property of the local network gateway resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetLocalNetworkGatewayResult(GetLocalNetworkGatewayResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetLocalNetworkGatewayResult(
bgp_settings=self.bgp_settings,
etag=self.etag,
fqdn=self.fqdn,
gateway_ip_address=self.gateway_ip_address,
id=self.id,
local_network_address_space=self.local_network_address_space,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
resource_guid=self.resource_guid,
tags=self.tags,
type=self.type)
def get_local_network_gateway(local_network_gateway_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetLocalNetworkGatewayResult:
"""
A common class for general resource information.
:param str local_network_gateway_name: The name of the local network gateway.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['localNetworkGatewayName'] = local_network_gateway_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20200401:getLocalNetworkGateway', __args__, opts=opts, typ=GetLocalNetworkGatewayResult).value
return AwaitableGetLocalNetworkGatewayResult(
bgp_settings=__ret__.bgp_settings,
etag=__ret__.etag,
fqdn=__ret__.fqdn,
gateway_ip_address=__ret__.gateway_ip_address,
id=__ret__.id,
local_network_address_space=__ret__.local_network_address_space,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
resource_guid=__ret__.resource_guid,
tags=__ret__.tags,
type=__ret__.type)
| [
"noreply@github.com"
] | morrell.noreply@github.com |
c1baffca1113db80056ca413d53c5e5775d023d8 | 402cb8ac32c5ca7a53f5875688d1ebba1e96474b | /set41.py | 022632c87d78cc3aa65a3ee9f15eb2e24c5c22b5 | [] | no_license | Srija-U/codekataplayer | c073a13d8621f641a8aba8f23ebee4e1b673d58f | 392f24f35f178b034cfb76d2acc31bbc4b3a5814 | refs/heads/master | 2020-05-02T10:59:45.052802 | 2019-07-22T00:27:46 | 2019-07-22T00:27:46 | 177,914,184 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | class s:
def __init__(self):
self.items=[]
def push(self,data):
if(data==')'):
self.pop()
else:
self.items.append(data)
def pop(self):
self.items.pop()
def is_empty(self):
return self.items==[]
o=s()
l=str(input())
for i in range(0,len(l),1):
o.push(l[i])
v=0
v=o.is_empty()
if(v==1):
print("yes")
else:
print("no")
| [
"noreply@github.com"
] | Srija-U.noreply@github.com |
6bce468c5760cc045ca616e1d5138f454bd28843 | 9e204a5b1c5ff4ea3b115ff0559b5af803ab4d15 | /019 Remove Nth Node From End of List.py | 41529a7bd2c40f1bb47863031dffc9d358a8a34b | [
"MIT"
] | permissive | Aminaba123/LeetCode | 178ed1be0733cc7390f30e676eb47cc7f900c5b2 | cbbd4a67ab342ada2421e13f82d660b1d47d4d20 | refs/heads/master | 2020-04-20T10:40:00.424279 | 2019-01-31T08:13:58 | 2019-01-31T08:13:58 | 168,795,374 | 1 | 0 | MIT | 2019-02-02T04:50:31 | 2019-02-02T04:50:30 | null | UTF-8 | Python | false | false | 1,269 | py | """
Given a linked list, remove the nth node from the end of list and return its head.
For example,
Given linked list: 1->2->3->4->5, and n = 2.
After removing the second node from the end, the linked list becomes 1->2->3->5.
Note:
Given n will always be valid.
Try to do this in one pass.
"""
__author__ = 'Danyang'
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def removeNthFromEnd(self, head, n):
"""
O(n)+O(n)
:param head: head node
:param n: the nth node from the end
:return: ListNode, head node
"""
# construct dummy
dummy = ListNode(0)
dummy.next = head
# get length of the linked list
length = 0
pre = dummy
while pre.next:
length += 1
pre=pre.next
# find & remove
pre = dummy
count = 0
while pre.next:
cur = pre.next
if count==length-n:
pre.next = cur.next # remove
break
else:
count += 1
pre = pre.next
return dummy.next
| [
"zhangdanyangg@gmail.com"
] | zhangdanyangg@gmail.com |
a71aa04a86453b8f255e54cdc46df17441160ad4 | 5a69cab2b5ed410f944b57f3ec586b9c624a735c | /architect/tcalc/parser.py | 8931a5974bbfe72ea9786aeef762f7c16f91dcc8 | [
"Apache-2.0"
] | permissive | T3kton/architect | 214a176dd5f9a9bc340d358d692e16a61f362ebe | 3368a66c0c1836eca12dbc7af97f01d5ba13984a | refs/heads/master | 2021-01-20T09:03:25.451300 | 2018-09-17T23:03:24 | 2018-09-17T23:03:24 | 90,217,916 | 0 | 2 | Apache-2.0 | 2018-09-17T23:03:24 | 2017-05-04T03:29:18 | Python | UTF-8 | Python | false | false | 15,425 | py | import string
# for now we are going to barrow parsimonious from contractor
from architect.tcalc.parsimonious import Grammar, ParseError, IncompleteParseError
# TODO: make sure a distrubution function is used
tcalc_grammar = """
script = line*
line = definition? nl
definition = ws ( blueprint / variable ) ws ":" expression
expression = ws ( function / infix / boolean / not_ / external / ts_value / blueprint / variable / number_float / number_int ) ws
not_ = ~"[Nn]ot" expression
number_float = ~"[-+]?[0-9]+\.[0-9]+"
number_int = ~"[-+]?[0-9]+"
boolean = ~"[Tt]rue" / ~"[Ff]alse"
function = label "(" ( expression "," )* expression ")"
external = ( "*INDEX*" / "*TOTAL*" / "*COST*" / "*AVAILABILITY*" / "*RELIABILITY*" )
variable = label !"("
infix = "(" expression ( "^" / "*" / "/" / "%" / "+" / "-" / "&" / "|" / "and"/ "or" / "==" / "!=" / "<=" / ">=" / ">" / "<" ) expression ")"
label = ~"[a-zA-Z][a-zA-Z0-9_\-]*"
ts_value = ~"@[a-zA-Z][a-zA-Z0-9_\-]*"
blueprint = ~"#[a-zA-Z0-9][a-zA-Z0-9_\-]*"
ws = ~"[ \t]*"
nl = ~"[\\r\\n]*"
"""
external_lookup = {
'init': { '*TOTAL*': ( '_T_', False ), '*COST*': ( '_C_', True ), '*AVAILABILITY*': ( '_A_', True ), '*RELIABILITY*': ( '_R_', True ) }, # there is not *INDEX* for the init function
'main': { '*INDEX*': ( '_I_', False ), '*TOTAL*': ( '_T_', False ), '*COST*': ( '_C_', False ), '*AVAILABILITY*': ( '_A_', False ), '*RELIABILITY*': ( '_R_', False ) },
}
class ParserError( Exception ):
def __init__( self, column, msg ):
self.column = column
self.msg = msg
def __str__( self ):
return 'ParseError, column: {0}, "{1}"'.format( self.column, self.msg )
def lint( script ):
parser = Parser()
return parser.lint( script )
def parse( script ):
parser = Parser()
return parser.parse( script )
# TODO: subsetting needs to be 100%, otherwise the user dosen't know the maxvalue with out to much work.
# NOTE: Function paramaters must be numbered, so that the number of required
# functions is correctly detected
function_map = {
# distrubution
'periodic': '( {0} ) if not bool( {0} % {1} ) else False',
# 'linear': 'bool( {0} == math.ceil( math.floor( ( {0} * ( {1} / _T_ ) ) + 0.00000000001 ) * ( _T_ / {1} ) ) )',
'linear': '( {0} ) if ( {0} ) in linear_slots_{ID} else False',
'weighted': '( {0} ) if ( {0} ) in weighted_slots_{ID} else False',
# subsetting
'above': '( {0} ) if ( {0} ) > ( {1} ) and {0} is not False and {1} is not False else False',
'below': '( {0} ) if ( {0} ) < ( {1} ) and {0} is not False and {1} is not False else False',
'above_inclusive': '( {0} ) if ( {0} ) >= ( {1} ) and {0} is not False and {1} is not False else False',
'below_inclusive': '( {0} ) if ( {0} ) <= ( {1} ) and {0} is not False and {1} is not False else False',
'filter': '( {0} ) if ( {1} ) else False',
}
function_init_map = {
'linear': """
import math
global linear_slots_{ID}
if {1} > _T_ or {1} < 1:
raise ValueError( 'Count should be more than 0 and less than the number of slots' )
linear_slots_{ID} = [ math.ceil( i * safe_div( _T_, {1} ) ) for i in range( 0, int( {1} ) ) ]
""",
'weighted': """
import math
global weighted_slots_{ID}
avg_weight = float( sum( {2} ) ) / len( {2} )
counter = 0
weighted_slots_{ID} = []
for i in range( 0, int( {1} ) ):
bucket = int( counter / SLOTS_PER_BUCKET )
try:
interval = ( _T_ * ( avg_weight / {2}[ bucket ] ) ) / {1}
interval = min( interval, SLOTS_PER_BUCKET )
except ( ZeroDivisionError, TypeError ):
interval = SLOTS_PER_BUCKET
if interval < 1:
raise ValueError( 'interval drops below 1' )
counter += interval
if counter > _T_:
raise ValueError( 'Counter exceted total' )
weighted_slots_{ID}.append( math.ceil( counter ) )
"""
}
"""
Functions:
number per complex : number of instances per complex = X % ( buckets per complex / count )
weighted distrubution : = weighted value of curent complex / take the avertage of all the weights values -> factor that into even distrubution
complex subset: prune down to bucekts that are in or out of list of complexes
for a/b testing: need above, below, every X
periodic: should produce the same result as [ INDEX for INDEX in range( 0, total_slots ) if not bool( INDEX % _PARAM_ ) ] where *INDEX* is the first paramater
def periodic( i ):
return not bool( i % PARAM )
( for non pre-computed version )
linear: should produce the same result as [ math.ceil( INDEX * ( total_slots / _PARAM_ ) ) for INDEX in range( 0, _PARAM_ ) ] where *INDEX* is the first paramater
def linear( i ):
return i == math.ceil( math.floor( ( i * ( COUNT / slot_count ) ) + 0.00000000001 ) * ( slot_count / COUNT ) )
NOTE: the magic number is to compensate for some small round down during the division
NOTE2: it is important that ( COUNT / slot_count ) be >= 1
"""
# TODO: how to do a dirivitave or integrated value....
# some kind of remember last value for a function
# mabey all the functions should exist in the same
# blob, and have a save for next time value per index
# if the functions get to involved we are going to need macros
# mabey for the first round we skip the PD controller and just scale
# with historsis
class Script():
def __init__( self, code ):
super().__init__()
self.code = code
self.slots_per_bucket = None
self.ts_value_map = None
def setBuckets( self, slots_per_bucket, bucket_cost, bucket_availability, bucket_reliability ):
if not isinstance( slots_per_bucket, int ) or slots_per_bucket < 1 or slots_per_bucket > 10000:
raise ValueError( 'slots_per_bucket must be int and from 1 to 10000' )
if not isinstance( bucket_cost, list ) or not isinstance( bucket_availability, list ) or not isinstance( bucket_reliability, list ):
raise ValueError( 'bucket_cost, bucket_availability, and bucket_reliability must be list' )
if len( bucket_cost ) != len( bucket_availability ) != len( bucket_reliability ) or len( bucket_cost ) < 1 or len( bucket_cost ) > 1000:
raise ValueError( 'bucket_cost, bucket_availability, and bucket_reliability must be the same size, and length from 1 to 1000' )
self.slots_per_bucket = slots_per_bucket
self.bucket_cost = bucket_cost
self.bucket_availability = bucket_availability
self.bucket_reliability = bucket_reliability
self.total_slots = len( self.bucket_cost ) * self.slots_per_bucket
def setTimeSeriesValues( self, ts_value_map ):
if not isinstance( ts_value_map, dict ):
raise ValueError( 'external_value_map must be a dict' )
self.ts_value_map = ts_value_map
def _evaluate( self, bucket, index ):
try:
value_map = self.code[ 'main' ]( index, self.total_slots, self.bucket_cost[ bucket ], self.bucket_availability[ bucket ], self.bucket_reliability[ bucket ], self.ts_value_map )
except KeyError as e:
raise ValueError( 'value "{0}" not defined'.format( e.args[0] ) )
return dict( zip( value_map.keys(), [ i >= 0 and i is not False and i is not None for i in value_map.values() ] ) ) # yes this is a bit odd, however '0' is a valid bucket/offset
def evaluate( self ):
if self.slots_per_bucket is None:
raise ValueError( 'bucket info has not been set' )
if self.ts_value_map is None:
raise ValueError( 'timeseries values have not been set' )
self.code[ '__builtins__' ][ 'SLOTS_PER_BUCKET' ] = self.slots_per_bucket
self.code[ 'init' ]( self.total_slots, self.bucket_cost, self.bucket_availability, self.bucket_reliability, self.ts_value_map )
result_name_list = self._evaluate( 0, 0 ).keys()
result = dict( zip( result_name_list, [ [] for i in result_name_list ] ) )
index = 0
for bucket in range( 0, len( self.bucket_cost ) ):
for offset in range( 0, self.slots_per_bucket ):
tmp = self._evaluate( bucket, index )
for key, value in tmp.items():
if value:
result[ key ].append( index )
index += 1
return result
class Parser():
def __init__( self ):
super().__init__()
self.grammar = Grammar( tcalc_grammar )
self.function_initer_list = []
self.mode = 'main'
def lint( self, script ):
self.function_initer_list = []
self.mode = 'main'
try:
ast = self.grammar.parse( script )
except IncompleteParseError as e:
return 'Incomplete Parsing at column: {0}'.format( e.column() )
except ParseError as e:
return 'Error Parsing at column: {0}'.format( e.column() )
try:
self._eval( ast )
except Exception as e:
return 'Exception Parsing "{0}"'.format( e )
return None
def parse( self, script ):
self.function_initer_list = []
self.mode = 'main'
try:
ast = self.grammar.parse( script )
except IncompleteParseError as e:
raise ParserError( e.column(), 'Incomplete Parse' )
except ParseError as e:
raise ParserError( e.column(), 'Error Parsing' )
init = ''
body = self._eval( ast )
init = """import sys
def safe_div( numerator, denominator ):
if denominator == 0:
return sys.maxsize
return numerator / denominator
def init( _T_, _C_, _A_, _R_, ts_map ):
"""
if self.function_initer_list:
for name, init_id, paramaters in self.function_initer_list:
init += function_init_map[ name ].format( *paramaters, ID=init_id )
else:
init += ' pass'
script = init + """
def main( _I_, _T_, _C_, _A_, _R_, ts_map ):
b_map = {}
v_map = {}
""" + body + """
return b_map
"""
print( '*****************\n{0}\n*******************'.format( script ) )
tmp = {}
exec( compile( script, '<string>', 'exec' ), tmp )
return Script( tmp )
def _eval( self, node ):
if node.expr_name in ( 'ws', 'nl' ): # ignore wite space
return ''
if node.expr_name == '':
return self._eval( node.children[0] )
try:
handler = getattr( self, node.expr_name )
except AttributeError:
raise Exception( 'Unable to find handler for "{0}"'.format( node.expr_name ) )
return handler( node )
def script( self, node ):
if not node.children:
return ''
definition_list = [ self._eval( child ) for child in node.children ]
return '\n'.join( definition_list )
def line( self, node ):
if len( node.children[0].children ) == 0:
return ''
return self._eval( node.children[0] )
def definition( self, node ):
target = self._eval( node.children[1] )
value = self._eval( node.children[4] )
return ' {0} = {1}'.format( target, value )
def expression( self, node ):
return self._eval( node.children[1] )
def number_float( self, node ):
return '{0}'.format( float( node.text ) )
def number_int( self, node ):
return '{0}'.format( float( node.text ) )
def boolean( self, node ):
return '{0}'.format( True if node.text.lower() == 'true' else False )
def external( self, node ):
name = node.text
try:
lookup = external_lookup[ self.mode ][ name ]
except KeyError:
raise ValueError( 'External "{0}" not allowed here'.format( name ) )
if lookup[1]:
return '[ {0} ]'.format( lookup[0] )
else:
return lookup[0]
def ts_value( self, node ):
name = node.text
return 'ts_map[ \'{0}\' ]'.format( name[ 1: ] )
def variable( self, node ):
name = node.text
if self.mode == 'init':
raise ValueError( 'Unable to refrence values in init' )
return 'v_map[ \'{0}\' ]'.format( name )
def blueprint( self, node ):
name = node.text
if self.mode == 'init':
raise ValueError( 'Unable to refrence values in init' )
return 'b_map[ \'{0}\' ]'.format( name[ 1: ] )
def infix( self, node ):
left = self._eval( node.children[1] )
right = self._eval( node.children[3] )
left_list = False
right_list = False
if left[0] == '[':
left_list = True
left = left[ 1:-1 ]
if right[0] == '[':
right_list = True
right = right[ 1:-1 ]
operator = node.children[2].text
if operator == '/':
if left_list and right_list:
if len( left_list ) != len( right_list ):
raise ValueError( 'left and right lists are not the same length' )
return '[ safe_div( {0[i]}, {2}[i] ) for i in range( 0, len( {0} ) ) ]'.format( left, '', right )
elif left_list:
return '[ safe_div( i, {2} ) for i in {0} ]'.format( left, '', right )
elif right_list:
return '[ safe_div( {0}, i ) for i in {2} ]'.format( left, '', right )
return 'safe_div( {0}, {1} )'.format( left, right )
else:
value = '( {0} {1} {2} )'
if left_list and right_list:
if len( left_list ) != len( right_list ):
raise ValueError( 'left and right lists are not the same length' )
value = '[ {0}[ i ] {1} {2}[ i ] for i in range( 0, len( {0} ) ) ]'
elif left_list:
value = '[ i {1} {2} for i in {0} ]'
elif right_list:
value = '[ {0} {1} i for i in {2} ]'
return value.format( left, operator, right )
def not_( self, node ):
return 'not bool( {0} )'.format( self._eval( node.children[1] ) )
def function( self, node ):
name = node.children[0].text
param_value_list = []
children = list( node.children[2] )
children.append( node.children[3] )
for child in children:
param_value_list.append( self._eval( child ) )
try:
func_body = function_map[ name ]
except KeyError:
raise ValueError( 'Unknown function "{0}"'.format( name ) )
init_id = None
if name in function_init_map:
self.mode = 'init'
used_parm_list = _getFormatIds( function_init_map[ name ] )
init_param_value_list = []
for i in range( 0, len( children ) ):
if i not in used_parm_list:
init_param_value_list.append( None )
else:
child = children[i]
value = self._eval( child )
if value.startswith( '[ _' ):
value = value[ 1:-1 ]
init_param_value_list.append( value )
init_id = 'id{0}'.format( len( self.function_initer_list ) )
self.function_initer_list.append( ( name, init_id, init_param_value_list ) )
self.mode = 'main'
max_paramater = max( [0] + _getFormatIds( func_body ) )
if init_id is not None:
max_paramater = max( [ max_paramater ] + _getFormatIds( function_init_map[ name ] ) )
max_paramater += 1
if len( param_value_list ) != max_paramater:
raise ValueError( 'Expected {0} paramaters, got {1}'.format( max_paramater, len( param_value_list ) ) )
if init_id is not None:
return function_map[ name ].format( *param_value_list, ID=init_id )
else:
return function_map[ name ].format( *param_value_list )
def _getFormatIds( scriptlet ):
return [ int( v[1] ) for v in string.Formatter().parse( scriptlet ) if v[1] is not None and v[1] not in ( 'ID', ) ]
| [
"pnhowe@gmail.com"
] | pnhowe@gmail.com |
5eca7cbd6d0508a7363e93d232e67393cf4c08ac | 2f0cb310e2ec8fb176ee240aa964a7eef5ed23b4 | /giico/giico/report/fuel_card_summary/fuel_card_summary.py | 229f2dfacad40fdcf6e938782dfe93c93ae34bb8 | [
"MIT"
] | permissive | thispl/giico | b96cf6b707f361275f8723d15f8ea1f95f908c9c | 14c5631639ab56a586a7962be9871d722c20e205 | refs/heads/master | 2021-06-18T03:56:02.928303 | 2021-04-27T06:42:59 | 2021-04-27T06:42:59 | 200,183,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | # Copyright (c) 2013, VHRS and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
def execute(filters=None):
columns, data = [], []
return columns, data
| [
"hereabdulla@gmail.com"
] | hereabdulla@gmail.com |
3eb75a3150a4dcc08540c10af5f332ba2603423b | 776c8a5821eb8cd1357439454c9c20c9da239afb | /November,2020~July,2021/2021-01-01/9273_이승민_정제천을팔자_시간초과.py | 141f915d871d9eea921da90528b080d83b9a24e8 | [] | no_license | JinYeJin/algorithm-study | 85d84a726e0f7bb78a2da37504bc04a42b3906ea | 538c911e6adcdad3bfed3d9f76ccb30804dfb768 | refs/heads/master | 2023-07-04T16:09:12.101837 | 2021-08-14T02:23:44 | 2021-08-14T02:23:44 | 272,363,049 | 8 | 2 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | # 정제헌을 팔자!
# 구현 아이디어 : n ** 2의 n까지의 약수의 개수를 구하기
import sys
input = sys.stdin.readline
def solution():
while 1:
arr = list(map(str, input()))
tmp = "".join(map(str, arr[2:]))
n = int(tmp)
cnt = 0
for i in range(1, n + 1):
if n ** 2 % i == 0:
cnt += 1
print(cnt)
solution() | [
"yeen0606@gmail.com"
] | yeen0606@gmail.com |
3ed1cf66d9c905bf48476a9373aa40591ee6fb9d | 6e6f97f416c06aada38c3a9db23eed7517bfaa6d | /accounts/migrations/0009_remove_profile_image_url.py | c34102ddb2e9e7f6d4d141d5c54b1eef76f1406d | [
"MIT"
] | permissive | ZendaInnocent/sogea | 1735ad047539c09a5c81e196a7a1963022452098 | 54cf257856cae451ad87e2396b8e44a34c0c6daf | refs/heads/main | 2023-08-23T07:18:45.741826 | 2021-10-28T13:19:06 | 2021-10-28T13:19:06 | 365,683,816 | 0 | 0 | MIT | 2021-05-09T06:29:57 | 2021-05-09T06:29:57 | null | UTF-8 | Python | false | false | 332 | py | # Generated by Django 3.1.7 on 2021-03-17 14:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0008_profile_image_url'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='image_url',
),
]
| [
"medsonnaftal@gmail.com"
] | medsonnaftal@gmail.com |
d02f7c02234dae46ada1d4880986741d24acb10c | 9cdccd361d2cc778e049ec16008bc457ebaba8ad | /wef/wef/views/home.py | 40860b43c7da518eae00ae33f9afe37adb572616 | [
"MIT"
] | permissive | deadlylaid/book_connect | 2c41c713c63927df27436038c26eb37d70e8b099 | a024363ed1ab06fbb21a9b5da6a04eda9d7dfb35 | refs/heads/master | 2022-11-24T23:37:34.679963 | 2020-12-23T07:10:30 | 2020-12-23T07:10:30 | 63,302,135 | 6 | 0 | MIT | 2022-11-22T01:21:30 | 2016-07-14T04:25:18 | Python | UTF-8 | Python | false | false | 679 | py | from django.views.generic import View
from django.shortcuts import render
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.paginator import Paginator
from items.models import ItemPost
class Home(View):
def get(self, request):
page = 1
page_per = 5
paginator = Paginator(
ItemPost.objects.filter(is_deleted=False).order_by('-id'),
page_per,
)
posts = paginator.page(page)
context = {
"posts": posts
}
return render(
request,
'home.html',
context,
)
| [
"deadlylaid@gmail.com"
] | deadlylaid@gmail.com |
1fe706abe282ddddde6afc011ea1a12d9120f990 | 03f6ad21c4332b9b26dfb11ed04e63bdb9236b3c | /benchmarks/benchmark_log.py | 4c7aca41e6a2d68e1e4b8e45974fd7b414720227 | [
"Apache-2.0"
] | permissive | m1griffin/arrayfunc | ddf9ea9c8fa363f79babd788c8d0428ede8dfc60 | c04561c5d565ae8d3ee776783bfb34b242deca93 | refs/heads/master | 2023-08-05T00:09:27.530893 | 2023-07-19T12:46:37 | 2023-07-19T12:46:37 | 40,577,669 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,718 | py | #!/usr/bin/env python3
##############################################################################
# Project: arrayfunc
# Module: benchmark_log.py
# Purpose: Benchmark tests for 'arrayfunc' functions.
# Language: Python 3.5
# Date: 20-Dec-2018.
# Ver: 07-Sep-2021.
#
###############################################################################
#
# Copyright 2014 - 2021 Michael Griffin <m12.griffin@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##############################################################################
##############################################################################
import time
import array
import itertools
import math
import platform
import json
import collections
import argparse
import arrayfunc
##############################################################################
########################################################
def InitOptionData(arraycode, arraysize, funcname):
"""Initialise the data used only for some tests.
"""
odata = collections.namedtuple('optiondata', ['truediv_type', 'ldexp_y',
'compval', 'pycomp', 'startcycle', 'endcycle',
'invertmaxval', 'invertop', 'fidataout'])
optiondata = odata
# Ensure the data is in the right format for the array type.
if arraycode in ('f', 'd'):
optiondata.truediv_type = float
else:
optiondata.truediv_type = int
# Function ldexp needs a specific array type as the second parameter.
if funcname == 'ldexp':
ydata = []
optiondata.ldexp_y = int(ydata[-1])
else:
optiondata.ldexp_y = None
# This is used for some tests.
if arraycode in ('f', 'd'):
optiondata.compval = float(0)
else:
optiondata.compval = int(0)
# Used for compress.
if 'log' == 'compress':
optiondata.compdata = array.array(arraycode, [1,0,1,0])
optiondata.pycomp = array.array(arraycode, (x for x,y in zip(itertools.cycle(optiondata.compdata), itertools.repeat(0, arraysize))))
else:
optiondata.compdata = None
optiondata.pycomp = None
# Used for cycle.
if 'log' == 'cycle':
optiondata.startcycle = comptype(arraycode, 0)
optiondata.endcycle = comptype(arraycode, 127)
else:
optiondata.startcycle = None
optiondata.endcycle = None
# Used for invert.
if 'log' == 'invert':
optiondata.invertmaxval = allinvertlimits[arraycode]
if arraycode in ('b', 'h', 'i', 'l', 'q'):
optiondata.invertop = invertpysigned
else:
optiondata.invertop = invertpyunsigned
else:
optiondata.invertmaxval = None
optiondata.invertop = None
# Used for findindices.
if 'fidataout' in ('dataout'):
optiondata.fidataout = array.array('q', itertools.repeat(0, arraysize))
else:
optiondata.fidataout = None
return optiondata
########################################################
def InitDataArrays(arraycode, arraysize):
"""Initialise the data arrays used to run the tests.
"""
adata = collections.namedtuple('arraydata', ['datax', 'dataout',
'yvalue', 'zvalue', 'arraylength'])
arraydata = adata
# Ensure the data is in the right format for the array type.
if arraycode in ('f', 'd'):
xdata = [float(x) for x in [1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0,10.0]]
else:
xdata = [int(x) for x in [1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0,10.0]]
arraydata.datax = array.array(arraycode, (x for x,y in zip(itertools.cycle(xdata), itertools.repeat(0, arraysize))))
assert len(arraydata.datax) == arraysize, 'datax is not expected length %d' % len(arraydata.datax)
arraydata.arraylength = len(arraydata.datax)
# Y data.
ydata = []
if len(ydata) > 0:
yvalue = abs(ydata[-1])
if arraycode in ('f', 'd'):
arraydata.yvalue = float(yvalue)
else:
arraydata.yvalue = int(yvalue)
else:
arraydata.yvalue = None
# Z data.
zdata = []
if len(zdata) > 0:
zvalue = abs(zdata[-1])
if arraycode in ('f', 'd'):
arraydata.zvalue = float(zvalue)
else:
arraydata.zvalue = int(zvalue)
else:
arraydata.zvalue = None
# Output array.
if 'dataout' in ('dataout'):
arraydata.dataout = array.array(arraycode, itertools.repeat(0, arraydata.arraylength))
assert len(arraydata.dataout) == arraysize, 'dataout is not expected length %d' % len(arraydata.dataout)
else:
arraydata.dataout = None
return arraydata
########################################################
def calibrateruntime(arraycode, arraysize, arraydata, optiondata, runtimetarget):
"""Calibrate the run time for Python and default ArrayFunc.
"""
pyitercounts = 1
afitercounts = 50
# First, do a timing calibration run.
# Python native time.
pytime = BenchmarkPython(pyitercounts, arraycode, arraysize, arraydata, optiondata)
# Arrayfunc time.
aftime = BenchmarkAF(afitercounts, arraycode, arraydata, optiondata)
# Now calculate the average execution time and adjust the iterations
# so that the tests will take approximately 0.1 seconds.
# The time returned by the benchmark function is per iteration, so
# we don't need to adjust for this again.
pyitercounts = int(runtimetarget / pytime)
afitercounts = int(runtimetarget / aftime)
# Make sure the iteration count is at least 1.
if pyitercounts < 1:
pyitercounts = 1
if afitercounts < 1:
afitercounts = 1
return pyitercounts, afitercounts
########################################################
def calibratesimdruntime(arraycode, arraydata, optiondata, runtimetarget):
"""Calibrate the run time with SIMD disabled.
"""
afitersidmcounts = 50
# Arrayfunc time without SIMD for functions with SIMD.
aftimenosimd = BenchmarkAFErrTrueSimdFalse(afitersidmcounts, arraycode, arraydata, optiondata)
afitersidmcounts = int(runtimetarget / aftimenosimd)
if afitersidmcounts < 1:
afitersidmcounts = 1
return afitersidmcounts
########################################################
def BenchmarkPython(pyitercounts, arraycode, arraysize, arraydata, optiondata):
"""Measure execution time of native Python code.
"""
# This is used for some tests only.
result = True
# We provide a local reference to the arrays to make the representation simpler.
datax = arraydata.datax
dataout = arraydata.dataout
yvalue = arraydata.yvalue
zvalue = arraydata.zvalue
arraylength = arraydata.arraylength
# Used for ldexp only.
ldexp_y = optiondata.ldexp_y
compval = optiondata.compval
truediv_type = optiondata.truediv_type
fidataout = optiondata.fidataout
startcycle = optiondata.startcycle
endcycle = optiondata.endcycle
pycomp = optiondata.pycomp
compdata = optiondata.compdata
invertmaxval = optiondata.invertmaxval
invertop = optiondata.invertop
# Time for python.
starttime = time.perf_counter()
if True:
for x in range(pyitercounts):
for i in range(arraylength):
dataout[i] = math.log(datax[i])
else:
for x in range(pyitercounts):
dataout[i] = math.log(datax[i])
endtime = time.perf_counter()
pythontime = (endtime - starttime) / pyitercounts
return pythontime
########################################################
def BenchmarkAF(afitercounts, arraycode, arraydata, optiondata):
"""Measure execution time for arrayfunc with defaults.
"""
# This is used for some tests only.
result = True
# We provide a local reference to the arrays to make the representation simpler.
datax = arraydata.datax
dataout = arraydata.dataout
yvalue = arraydata.yvalue
zvalue = arraydata.zvalue
# Used for ldexp only.
ldexp_y = optiondata.ldexp_y
compval = optiondata.compval
fidataout = optiondata.fidataout
startcycle = optiondata.startcycle
endcycle = optiondata.endcycle
pycomp = optiondata.pycomp
compdata = optiondata.compdata
# Time for arrayfunc version.
starttime = time.perf_counter()
for i in range(afitercounts):
arrayfunc.log(datax, dataout)
endtime = time.perf_counter()
aftime = (endtime - starttime) / afitercounts
return aftime
########################################################
def BenchmarkAFErrTrueSimdTrue(afitercounts, arraycode, arraydata, optiondata):
"""Measure execution time for arrayfunc with MathErrors ignored and SIMD turned off.
"""
# This is used for some tests only.
result = True
# We provide a local reference to the arrays to make the representation simpler.
datax = arraydata.datax
dataout = arraydata.dataout
yvalue = arraydata.yvalue
zvalue = arraydata.zvalue
# Used for ldexp only.
ldexp_y = optiondata.ldexp_y
compval = optiondata.compval
fidataout = optiondata.fidataout
startcycle = optiondata.startcycle
endcycle = optiondata.endcycle
pycomp = optiondata.pycomp
compdata = optiondata.compdata
# Time for arrayfunc version.
starttime = time.perf_counter()
for i in range(afitercounts):
arrayfunc.log(datax, dataout, matherrors=True)
endtime = time.perf_counter()
aftime = (endtime - starttime) / afitercounts
return aftime
########################################################
def BenchmarkAFErrFalseSimdTrue(afitercounts, arraycode, arraydata, optiondata):
"""Measure execution time for arrayfunc with SIMD turned off.
"""
# This is used for some tests only.
result = True
# We provide a local reference to the arrays to make the representation simpler.
datax = arraydata.datax
dataout = arraydata.dataout
yvalue = arraydata.yvalue
zvalue = arraydata.zvalue
# Used for ldexp only.
ldexp_y = optiondata.ldexp_y
compval = optiondata.compval
fidataout = optiondata.fidataout
startcycle = optiondata.startcycle
endcycle = optiondata.endcycle
pycomp = optiondata.pycomp
compdata = optiondata.compdata
# Time for arrayfunc version.
starttime = time.perf_counter()
for i in range(afitercounts):
arrayfunc.log(datax, dataout)
endtime = time.perf_counter()
aftime = (endtime - starttime) / afitercounts
return aftime
########################################################
def BenchmarkAFErrTrueSimdFalse(afitercounts, arraycode, arraydata, optiondata):
"""Measure execution time for arrayfunc with matherrors=True.
"""
# This is used for some tests only.
result = True
# We provide a local reference to the arrays to make the representation simpler.
datax = arraydata.datax
dataout = arraydata.dataout
yvalue = arraydata.yvalue
zvalue = arraydata.zvalue
# Used for ldexp only.
ldexp_y = optiondata.ldexp_y
compval = optiondata.compval
fidataout = optiondata.fidataout
startcycle = optiondata.startcycle
endcycle = optiondata.endcycle
pycomp = optiondata.pycomp
compdata = optiondata.compdata
# Time for arrayfunc version.
starttime = time.perf_counter()
for i in range(afitercounts):
arrayfunc.log(datax, dataout, matherrors=True)
endtime = time.perf_counter()
aftime = (endtime - starttime) / afitercounts
return aftime
##############################################################################
def GetCmdArguments():
""" Get any command line arguments. These modify the operation of the program.
rawoutput = If specified, will output raw data instead of a report.
mintest = If specified, will do a minimal test.
arraysize = Size of the array in elements.
runtimetarget = The target length of time in seconds to run a benchmark for.
"""
arraysize = 100000
runtimetarget = 0.1
# Get any command line arguments.
parser = argparse.ArgumentParser()
# Output just the raw data.
parser.add_argument('--rawoutput', action = 'store_true', help = 'Output raw data.')
# Do a minimal test. This will save time when full results are not required.
parser.add_argument('--mintest', action = 'store_true', help = 'Do minimal test.')
# Size of the test arrays.
parser.add_argument('--arraysize', type = int, default = arraysize,
help='Size of test arrays in number of elements.')
# The length of time to run each benchmark.
parser.add_argument('--runtimetarget', type = float, default = runtimetarget,
help='Target length of time to run each benchmark for.')
args = parser.parse_args()
return args
##############################################################################
CmdArgs = GetCmdArguments()
ArraySize = CmdArgs.arraysize
RunTimeTarget = CmdArgs.runtimetarget
##############################################################################
# Run the benchmarks.
funcname = 'log'
supportedarrays = ('f', 'd')
# True if platform supports SIMD.
PlatformHasSIMD = arrayfunc.simdsupport.hassimd
# Detect the hardware platform, and assign the correct platform data table to it.
def platformdetect():
""" Return a string containing the array codes if the machine supports
SIMD for this function. The results will vary depending upon which platform
it is running on.
"""
# These are the supported options for SIMD. The values depend on
# the particular function in question.
# i686 = 32 bit x86, this never has SIMD.
# x86_64 = 64 bit x86, supported on Linux with GCC only.
# armv7l = 32 bit ARM, for Raspberry Pi 3 with 32 bit Linux.
# aarch64 = 64 bit ARM, for Raspberry Pi 3 or 4 with 64 bit Linux.
# These values were derived from the platform data reported by the benchmark.
signatures = {
'i686' : '',
'x86_64' : '',
'armv7l' : '',
'aarch64' : '',
}
return signatures.get(platform.machine(), '')
if PlatformHasSIMD:
SIMDArrays = platformdetect()
else:
SIMDArrays = ''
# Uses SIMD on at least one array type.
HasSIMDOption = len(SIMDArrays) > 0
##############################################################################
# True if this benchmark allows math error detection to be turned off.
# We check a copy of the equation from the template in order to check this.
# Note: Need double quotes around the equation because some functions contain
# a string with single quotes, and this would cause a conflict if we used single
# quotes to enclose this.
HasMathErrorOption = 'matherrors' in "arrayfunc.log(datax, dataout, matherrors=True)"
##############################################################################
# Used to collect the results.
PyData = {}
AfData = {}
AfDataErrTrueSimdTrue = {}
AfDataErrFalseSimdTrue = {}
AfDataErrTrueSimdFalse = {}
# Test using each array type.
for arraycode in supportedarrays:
# This array type supports SIMD. Some functions do not support SIMD at all,
# while others support it only for some array types on some platforms.
ArrayHasSIMD = arraycode in SIMDArrays
# Initialise the data arrays.
ArrayData = InitDataArrays(arraycode, ArraySize)
# Initialise the optional data elements that are only used for some tests.
OptionData = InitOptionData(arraycode, ArraySize, funcname)
# Calibrate the test runtime targets.
pyitercounts, afitercounts = calibrateruntime(arraycode, ArraySize, ArrayData, OptionData, RunTimeTarget)
if ArrayHasSIMD:
afitersidmcounts = calibratesimdruntime(arraycode, ArrayData, OptionData, RunTimeTarget)
# Benchmark the Python implementation.
PyData[arraycode] = BenchmarkPython(pyitercounts, arraycode, ArraySize, ArrayData, OptionData)
# Benchmark the Arrayfunc implementation with default parameters.
# This covers user requested minimal tests, plus functions which do not
# have either error checking or SIMD.
AfData[arraycode] = BenchmarkAF(afitercounts, arraycode, ArrayData, OptionData)
# A minimal test only involves the default parameters.
if not CmdArgs.mintest:
# Function has error checking but not SIMD. Test error checking turned off.
# The default case covers with error checking turned on.
if HasMathErrorOption and not ArrayHasSIMD:
AfDataErrTrueSimdTrue[arraycode] = BenchmarkAFErrTrueSimdTrue(afitercounts, arraycode, ArrayData, OptionData)
# Function does not have error checking but does have SIMD.
# Test SIMD turned off. The default case covers with SIMD turned on.
if (not HasMathErrorOption) and ArrayHasSIMD:
AfDataErrTrueSimdTrue[arraycode] = BenchmarkAFErrTrueSimdTrue(afitercounts, arraycode, ArrayData, OptionData)
# Function has both error checking and SIMD. Check for:
# error checking on and SIMD off,
# error checking off and SIMD off,
# error checking off and SIMD on
if HasMathErrorOption and ArrayHasSIMD:
AfDataErrFalseSimdTrue[arraycode] = BenchmarkAFErrFalseSimdTrue(afitercounts, arraycode, ArrayData, OptionData)
AfDataErrTrueSimdTrue[arraycode] = BenchmarkAFErrTrueSimdTrue(afitercounts, arraycode, ArrayData, OptionData)
AfDataErrTrueSimdFalse[arraycode] = BenchmarkAFErrTrueSimdFalse(afitersidmcounts, arraycode, ArrayData, OptionData)
##############################################################################
##############################################################################
# Report the benchmarks.
# The format string used to print out results in stand alone mode.
def sformatter(pos, val):
if val is None:
return 17 * ' '
elif (val is not None) and (1.0 <= val < 10.0):
return '{%d:>8.1f} ' % (pos + 1)
elif (val is not None) and (val < 1.0):
return '{%d:>8.2f} ' % (pos + 1)
else:
return '{%d:>8.0f} ' % (pos + 1)
def printline(label1, col2, col3, col4, col5):
lineresult = [col2, col3, col4, col5]
standformat = '{0:^7}' + ''.join([sformatter(x,y) for x,y in enumerate(lineresult)])
print(standformat.format(label1, col2, col3, col4, col5))
# Report labels will vary depending on the options available with this function.
if HasMathErrorOption and HasSIMDOption:
theaderlabels = 'Err on SIMD off Err off SIMD off Err off SIMD on'
elif HasMathErrorOption and (not HasSIMDOption):
theaderlabels = ' Error check off'
elif (not HasMathErrorOption) and HasSIMDOption:
theaderlabels = ' SIMD off'
else:
theaderlabels = ''
theader = """
Function = {0}
======= ================ ================ ================ ================
Array AF vs Python {1}
======= ================ ================ ================ ================""".format(funcname, theaderlabels)
tfooter = '======= ================ ================ ================ ================'
def calcstats(statscolumn):
"""Calculate the states for a column of data.
Return the average, max, and min.
If the data column is empty, return None for each value.
"""
if len(statscolumn) > 0:
return sum(statscolumn) / len(statscolumn), max(statscolumn), min(statscolumn)
else:
return None, None, None
########################################################
def outputstandalone():
"""Output the results for when the benchmark is run in standalone mode.
This outputs whatever data is present, and so inherently adapts
itself to functions which have varying test options.
"""
totalpyrel = []
totalmathnosimdrel = []
totalsimdvsnosimd = []
totalnoerrwithsimd = []
print(theader)
for x in supportedarrays:
# Default versus native Python.
pyafrel = PyData[x] / AfData[x]
totalpyrel.append(pyafrel)
# Default versus math error checking on and no SIMD.
# If the function doesn't use SIMD then comparing it with SIMD off
# is pointless. Also skip for array types which don't use SIMD or
# for minimal tests.
if x in AfDataErrFalseSimdTrue:
mathnosimdrel = AfData[x] / AfDataErrFalseSimdTrue[x]
totalmathnosimdrel.append(mathnosimdrel)
else:
mathnosimdrel = None
# Default versus no error checking and no SIMD.
# If the function doesn't use math error checking then comparing it
# with math error off is pointless. Also skip for minimal tests.
if x in AfDataErrTrueSimdTrue:
simdnoerrnosimdrel = AfData[x] / AfDataErrTrueSimdTrue[x]
totalsimdvsnosimd.append(simdnoerrnosimdrel)
else:
simdnoerrnosimdrel = None
# No data exists if SIMD is not available.
if x in AfDataErrTrueSimdFalse:
# Default versus error checking turned off but SIMD enabled.
noerrwithsimd = AfData[x] / AfDataErrTrueSimdFalse[x]
totalnoerrwithsimd.append(noerrwithsimd)
else:
noerrwithsimd = None
printline(x, pyafrel, mathnosimdrel, simdnoerrnosimdrel, noerrwithsimd)
print(tfooter)
print()
print(tfooter)
# Calculate stats.
# Default versus native Python.
col2avg, col2max, col2min = calcstats(totalpyrel)
# Default versus math error checking on and no SIMD.
col3avg, col3max, col3min = calcstats(totalmathnosimdrel)
# Default versus no error checking and no SIMD.
col4avg, col4max, col4min = calcstats(totalsimdvsnosimd)
# Default versus error checking turned off but SIMD enabled.
col5avg, col5max, col5min = calcstats(totalnoerrwithsimd)
printline('avg', col2avg, col3avg, col4avg, col5avg)
printline('max', col2max, col3max, col4max, col5max)
printline('min', col2min, col3min, col4min, col5min)
print(tfooter)
########################################################
# If raw data is requested, output the raw numbers as JSON.
# This will normally be used by a parent process which called this
# benchmark as a child process.
if CmdArgs.rawoutput:
# Called by another process, return data as json.
testresults = {'pydata' : PyData,
'afdata' : AfData,
'afdataerrtruesimdtrue' : AfDataErrTrueSimdTrue,
'afdataerrtruesimdfalse' : AfDataErrTrueSimdFalse,
'afdataerrfalsesimdtrue' : AfDataErrFalseSimdTrue,
'benchname' : 'arrayfunc',
}
print(json.dumps(testresults))
else:
# If standalone, print out data in readable format.
outputstandalone()
##############################################################################
| [
"m12.griffin@gmail.com"
] | m12.griffin@gmail.com |
a09ff0316844e058aee4a997839bbdf28553ea89 | f6a6a8b7c0af49bd86930adde8ffe9ba37950046 | /finitediff/grid/__init__.py | 8a4df96de566e58f26627e8143e6e5b44750045a | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | bjodah/finitediff | 5cf65cfff94c85d5aa747c5b0616b93482d707ae | bfb1940cf5c7ce5c9a3b440d1efd8f8c4128fed8 | refs/heads/master | 2021-08-28T18:03:54.491501 | 2021-08-07T06:32:01 | 2021-08-07T06:39:34 | 14,988,640 | 34 | 5 | BSD-2-Clause | 2020-03-07T23:15:07 | 2013-12-06T17:10:24 | Python | UTF-8 | Python | false | false | 292 | py | from .rebalance import (
rebalanced_grid,
pre_pruning_mask,
combine_grids,
grid_pruning_mask,
)
from .refine import refine_grid
from .make import adapted_grid
from .util import locate_discontinuity, pool_discontinuity_approx, grid_error
from .plotting import plot_convergence
| [
"bjodah@gmail.com"
] | bjodah@gmail.com |
afb125cc33aca6fb089b34aaa288b914f5d698e8 | 70fa6468c768d4ec9b4b14fc94fa785da557f1b5 | /lib/surface/components/__init__.py | 66159453ab8e75d21f8d1020cb3956a276b8b50c | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | kylewuolle/google-cloud-sdk | d43286ef646aec053ecd7eb58566ab2075e04e76 | 75f09ebe779e99fdc3fd13b48621fe12bfaa11aa | refs/heads/master | 2020-04-20T22:10:41.774132 | 2019-01-26T09:29:26 | 2019-01-26T09:29:26 | 169,131,028 | 0 | 0 | NOASSERTION | 2019-02-04T19:04:40 | 2019-02-04T18:58:36 | Python | UTF-8 | Python | false | false | 3,929 | py | # -*- coding: utf-8 -*- #
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The super-group for the update manager."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import argparse
from googlecloudsdk.calliope import base
from googlecloudsdk.core import config
from googlecloudsdk.core import log
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Components(base.Group):
"""List, install, update, or remove Google Cloud SDK components.
The {command} command group lets you control which tools are installed
in the Cloud SDK. It can be used to install, update and remove components of
the Cloud SDK, ensuring a lean, up-to-date installation.
{command} regularly checks whether updates are available for the
tools you already have installed, and gives you the opportunity to upgrade to
the latest version.
Certain components have dependencies. {command} will install any dependencies,
and during removal, any dependant components will be uninstalled
automatically.
## EXAMPLES
To see all available components:
$ {command} list
To install a component you don't have:
$ {command} install COMPONENT
To remove a component you no longer need:
$ {command} remove COMPONENT
To update all components you have to their latest version:
$ {command} update
To update all installed components to version 1.2.3:
$ {command} update --version 1.2.3
"""
category = base.GCLOUD_SDK_TOOLS_CATEGORY
@staticmethod
def Args(parser):
"""Sets args for gcloud components."""
# An override for the location to install components into.
parser.add_argument('--sdk-root-override', required=False, hidden=True,
help='THIS ARGUMENT NEEDS HELP TEXT.')
# A different URL to look at instead of the default.
parser.add_argument('--snapshot-url-override', required=False, hidden=True,
help='THIS ARGUMENT NEEDS HELP TEXT.')
# This is not a commonly used option. You can use this flag to create a
# Cloud SDK install for an OS other than the one you are running on.
# Running the updater multiple times for different operating systems could
# result in an inconsistent install.
parser.add_argument('--operating-system-override', required=False,
hidden=True,
help='THIS ARGUMENT NEEDS HELP TEXT.')
# This is not a commonly used option. You can use this flag to create a
# Cloud SDK install for a processor architecture other than that of your
# current machine. Running the updater multiple times for different
# architectures could result in an inconsistent install.
parser.add_argument('--architecture-override', required=False, hidden=True,
help='THIS ARGUMENT NEEDS HELP TEXT.')
# pylint:disable=g-missing-docstring
def Filter(self, unused_tool_context, args):
base.DisableUserProjectQuota()
if config.INSTALLATION_CONFIG.IsAlternateReleaseChannel():
log.warning('You are using alternate release channel: [%s]',
config.INSTALLATION_CONFIG.release_channel)
# Always show the URL if using a non standard release channel.
log.warning('Snapshot URL for this release channel is: [%s]',
config.INSTALLATION_CONFIG.snapshot_url)
| [
"cloudsdk.mirror@gmail.com"
] | cloudsdk.mirror@gmail.com |
2cf6fc96c940437aff1a27460dddfc56013ab7ec | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02889/s447377526.py | 0de588993a718a3aa4684f5ab46849141c2ffbf5 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 963 | py | from scipy.sparse.csgraph import floyd_warshall
N,M,L = list(map(int, input().split()))
edges = [[0] * N for _ in range(N)]
for _ in range(M):
A,B,C = list(map(int, input().split()))
edges[A-1][B-1] = C
edges[B-1][A-1] = C
Q = int(input())
queries = []
for _ in range(Q):
queries.append(list(map(int,input().split())))
# use flord warshall to find min path between all towns
edges = floyd_warshall(edges)
# if the two towns can be travelled to on one tank, add to our fuel graph with distance 1
for i in range(N):
for j in range(N):
if edges[i][j] <= L:
edges[i][j] = 1
else:
edges[i][j] = 0
# use flord warshall to find min number of fuel tanks to travel between two towns
edges = floyd_warshall(edges)
for query in queries:
s = query[0] - 1
t = query[1] - 1
num_tanks = edges[s][t] - 1
if num_tanks != float('inf'):
print(int(num_tanks))
else:
print("-1")
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
ed84a635a4f264bedf8b43ef13bfdacf1f117a89 | 18783303fec404a4386c8ed0a8b8737c1424eadd | /Py exercises/IffyKeysValues_inalist.py | 2f4f2f342c53dca71a059ea0222cdfc45a995d94 | [
"MIT"
] | permissive | arvindkarir/python-pandas-code | a5b23b724c387d5ff745e017f968681847c506ad | fb3b68f07f0438cd0ef6d7ad669ce78650d884a8 | refs/heads/master | 2020-08-13T02:05:16.469398 | 2019-10-23T02:18:10 | 2019-10-23T02:18:10 | 214,887,098 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 723 | py | # it generates a dictionary with keys and values, still need to figure out
import random #a standard library module
def someList(numBuckets):
buckets = []
dictKey = 0
dictVal = 0
for i in range(numBuckets):
buckets.append([])
#print(buckets)
for i in range(20):
dictKey = random.randint(0, 10*2)
dictVal = i
#print(dictKey, dictVal)
hashBucket = buckets[dictKey%numBuckets]
for i in range(len(hashBucket)):
if hashBucket[i][0] == dictKey:
hashBucket[i] =(dictKey, dictVal)
return
hashBucket.append((dictKey, dictVal))
print(hashBucket)
D = someList(7) #this number should always be greater than range (the data set)
| [
"arvindkarir@gmail.com"
] | arvindkarir@gmail.com |
39c5052b25efb02d90dc7c7c625e9a4d98df6a2e | c2c8915d745411a0268ee5ce18d8bf7532a09e1a | /cybox-2.1.0.5/cybox/bindings/address_object.py | 4c3d1f4cedd38a57ab1d40aa8857fbf33a139b58 | [
"BSD-3-Clause"
] | permissive | asealey/crits_dependencies | 581d44e77f297af7edb78d08f0bf11ad6712b3ab | a8049c214c4570188f6101cedbacf669168f5e52 | refs/heads/master | 2021-01-17T11:50:10.020346 | 2014-12-28T06:53:01 | 2014-12-28T06:53:01 | 28,555,464 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36,008 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
#
# Generated Tue Apr 09 11:09:54 2013 by generateDS.py version 2.9a.
#
import sys
import getopt
import re as re_
import cybox_common
import base64
from datetime import datetime, tzinfo, timedelta
etree_ = None
Verbose_import_ = False
( XMLParser_import_none, XMLParser_import_lxml,
XMLParser_import_elementtree
) = range(3)
XMLParser_import_library = None
try:
# lxml
from lxml import etree as etree_
XMLParser_import_library = XMLParser_import_lxml
if Verbose_import_:
print("running with lxml.etree")
except ImportError:
try:
# cElementTree from Python 2.5+
import xml.etree.cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree on Python 2.5+")
except ImportError:
try:
# ElementTree from Python 2.5+
import xml.etree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree on Python 2.5+")
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree")
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree")
except ImportError:
raise ImportError(
"Failed to import ElementTree from any known place")
def parsexml_(*args, **kwargs):
if (XMLParser_import_library == XMLParser_import_lxml and
'parser' not in kwargs):
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
kwargs['parser'] = etree_.ETCompatXMLParser(huge_tree=True)
doc = etree_.parse(*args, **kwargs)
return doc
#
# User methods
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError, exp:
class GeneratedsSuper(object):
tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$')
class _FixedOffsetTZ(tzinfo):
def __init__(self, offset, name):
self.__offset = timedelta(minutes = offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return None
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node, input_name=''):
return input_data
def gds_format_base64(self, input_data, input_name=''):
return base64.b64encode(input_data)
def gds_validate_base64(self, input_data, node, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_integer_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of integers')
return input_data
def gds_format_float(self, input_data, input_name=''):
return '%f' % input_data
def gds_validate_float(self, input_data, node, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_float_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of floats')
return input_data
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_double_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of doubles')
return input_data
def gds_format_boolean(self, input_data, input_name=''):
return ('%s' % input_data).lower()
def gds_validate_boolean(self, input_data, node, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(node,
'Requires sequence of booleans '
'("true", "1", "false", "0")')
return input_data
def gds_validate_datetime(self, input_data, node, input_name=''):
return input_data
def gds_format_datetime(self, input_data, input_name=''):
if isinstance(input_data, basestring):
return input_data
if input_data.microsecond == 0:
_svalue = input_data.strftime('%Y-%m-%dT%H:%M:%S')
else:
_svalue = input_data.strftime('%Y-%m-%dT%H:%M:%S.%f')
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
def gds_parse_datetime(self, input_data, node, input_name=''):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'GMT')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S.%f')
else:
dt = datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S')
return dt.replace(tzinfo = tz)
def gds_validate_date(self, input_data, node, input_name=''):
return input_data
def gds_format_date(self, input_data, input_name=''):
if isinstance(input_data, basestring):
return input_data
_svalue = input_data.strftime('%Y-%m-%d')
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
def gds_parse_date(self, input_data, node, input_name=''):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'GMT')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
return datetime.strptime(input_data,
'%Y-%m-%d').replace(tzinfo = tz)
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
return None
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'utf-8'
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
#
# Support/utility functions.
#
def showIndent(outfile, level, pretty_print=True):
if pretty_print:
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
if not inStr:
return ''
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return unicode(s1).encode(ExternalEncoding)
def quote_attrib(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return unicode(s1).encode(ExternalEncoding)
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
if XMLParser_import_library == XMLParser_import_lxml:
msg = '%s (element %s/line %d)' % (
msg, node.tag, node.sourceline, )
else:
msg = '%s (element %s)' % (msg, node.tag, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
TypeBase64 = 8
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace, pretty_print=True):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(outfile, level, namespace, name, pretty_print)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' %
(self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' %
(self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' %
(self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' %
(self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeBase64:
outfile.write('<%s>%s</%s>' %
(self.name, base64.b64encode(self.value), self.name))
def to_etree(self, element):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
if len(element) > 0:
if element[-1].tail is None:
element[-1].tail = self.value
else:
element[-1].tail += self.value
else:
if element.text is None:
element.text = self.value
else:
element.text += self.value
elif self.category == MixedContainer.CategorySimple:
subelement = etree_.SubElement(element, '%s' % self.name)
subelement.text = self.to_etree_simple()
else: # category == MixedContainer.CategoryComplex
self.value.to_etree(element)
def to_etree_simple(self):
if self.content_type == MixedContainer.TypeString:
text = self.value
elif (self.content_type == MixedContainer.TypeInteger or
self.content_type == MixedContainer.TypeBoolean):
text = '%d' % self.value
elif (self.content_type == MixedContainer.TypeFloat or
self.content_type == MixedContainer.TypeDecimal):
text = '%f' % self.value
elif self.content_type == MixedContainer.TypeDouble:
text = '%g' % self.value
elif self.content_type == MixedContainer.TypeBase64:
text = '%s' % base64.b64encode(self.value)
return text
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n'
% (self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n'
% (self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s",\n' % \
(self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class AddressObjectType(cybox_common.ObjectPropertiesType):
"""The AddressObjectType is intended to characterize cyber
addresses.The category field specifies the address category that
is being defined. The is_source field specifies if this is a
"Source" addressThe is_destination field specifies if this is a
"Destination" address"""
subclass = None
superclass = cybox_common.ObjectPropertiesType
def __init__(self, object_reference=None, Custom_Properties=None, xsi_type=None, category='ipv4-addr', is_source=None, is_destination=None, is_spoofed=None, Address_Value=None, VLAN_Name=None, VLAN_Num=None):
super(AddressObjectType, self).__init__(object_reference, Custom_Properties, xsi_type )
self.category = _cast(None, category)
self.is_source = _cast(bool, is_source)
self.is_destination = _cast(bool, is_destination)
self.is_spoofed = _cast(bool, is_spoofed)
self.Address_Value = Address_Value
self.VLAN_Name = VLAN_Name
self.VLAN_Num = VLAN_Num
def factory(*args_, **kwargs_):
if AddressObjectType.subclass:
return AddressObjectType.subclass(*args_, **kwargs_)
else:
return AddressObjectType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Address_Value(self): return self.Address_Value
def set_Address_Value(self, Address_Value): self.Address_Value = Address_Value
def validate_StringObjectPropertyType(self, value):
# Validate type cybox_common.StringObjectPropertyType, a restriction on None.
pass
def get_VLAN_Name(self): return self.VLAN_Name
def set_VLAN_Name(self, VLAN_Name): self.VLAN_Name = VLAN_Name
def get_VLAN_Num(self): return self.VLAN_Num
def set_VLAN_Num(self, VLAN_Num): self.VLAN_Num = VLAN_Num
def validate_IntegerObjectPropertyType(self, value):
# Validate type cybox_common.IntegerObjectPropertyType, a restriction on None.
pass
def get_category(self): return self.category
def set_category(self, category): self.category = category
def get_is_source(self): return self.is_source
def set_is_source(self, is_source): self.is_source = is_source
def get_is_destination(self): return self.is_destination
def set_is_destination(self, is_destination): self.is_destination = is_destination
def get_is_spoofed(self): return self.spoofed
def set_is_spoofed(self, is_spoofed): self.is_spoofed = is_spoofed
def hasContent_(self):
if (
self.Address_Value is not None or
self.VLAN_Name is not None or
self.VLAN_Num is not None or
super(AddressObjectType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='AddressObj:', name_='AddressObjectType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AddressObjectType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='AddressObj:', name_='AddressObjectType'):
super(AddressObjectType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AddressObjectType')
if self.category is not None and 'category' not in already_processed:
already_processed.add('category')
outfile.write(' category=%s' % (quote_attrib(self.category), ))
if self.is_source is not None and 'is_source' not in already_processed:
already_processed.add('is_source')
outfile.write(' is_source="%s"' % self.gds_format_boolean(self.is_source, input_name='is_source'))
if self.is_destination is not None and 'is_destination' not in already_processed:
already_processed.add('is_destination')
outfile.write(' is_destination="%s"' % self.gds_format_boolean(self.is_destination, input_name='is_destination'))
if self.is_spoofed is not None and 'is_spoofed' not in already_processed:
already_processed.add('is_spoofed')
outfile.write(' is_spoofed="%s"' % self.gds_format_boolean(self.is_spoofed, input_name='is_spoofed'))
def exportChildren(self, outfile, level, namespace_='AddressObj:', name_='AddressObjectType', fromsubclass_=False, pretty_print=True):
super(AddressObjectType, self).exportChildren(outfile, level, 'AddressObj:', name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Address_Value is not None:
self.Address_Value.export(outfile, level, 'AddressObj:', name_='Address_Value', pretty_print=pretty_print)
if self.VLAN_Name is not None:
self.VLAN_Name.export(outfile, level, 'AddressObj:', name_='VLAN_Name', pretty_print=pretty_print)
if self.VLAN_Num is not None:
self.VLAN_Num.export(outfile, level, 'AddressObj:', name_='VLAN_Num', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('category', node)
if value is not None and 'category' not in already_processed:
already_processed.add('category')
self.category = value
value = find_attr_value_('is_source', node)
if value is not None and 'is_source' not in already_processed:
already_processed.add('is_source')
if value in ('true', '1'):
self.is_source = True
elif value in ('false', '0'):
self.is_source = False
else:
raise_parse_error(node, 'Bad boolean attribute')
value = find_attr_value_('is_destination', node)
if value is not None and 'is_destination' not in already_processed:
already_processed.add('is_destination')
if value in ('true', '1'):
self.is_destination = True
elif value in ('false', '0'):
self.is_destination = False
else:
raise_parse_error(node, 'Bad boolean attribute')
if value is not None and 'is_spoofed' not in already_processed:
already_processed.add('is_spoofed')
if value in ('true', '1'):
self.is_spoofed = True
elif value in ('false', '0'):
self.is_spoofed = False
else:
raise_parse_error(node, 'Bad boolean attribute')
super(AddressObjectType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Address_Value':
obj_ = cybox_common.StringObjectPropertyType.factory()
obj_.build(child_)
self.set_Address_Value(obj_)
elif nodeName_ == 'VLAN_Name':
obj_ = cybox_common.StringObjectPropertyType.factory()
obj_.build(child_)
self.set_VLAN_Name(obj_)
elif nodeName_ == 'VLAN_Num':
obj_ = cybox_common.IntegerObjectPropertyType.factory()
obj_.build(child_)
self.set_VLAN_Num(obj_)
super(AddressObjectType, self).buildChildren(child_, node, nodeName_, True)
# end class AddressObjectType
GDSClassesMapping = {
'Build_Utility': cybox_common.BuildUtilityType,
'Errors': cybox_common.ErrorsType,
'Time': cybox_common.TimeType,
'Certificate_Issuer': cybox_common.StringObjectPropertyType,
'Metadata': cybox_common.MetadataType,
'Hash': cybox_common.HashType,
'Information_Source_Type': cybox_common.ControlledVocabularyStringType,
'Block_Hash_Value': cybox_common.HashValueType,
'Fuzzy_Hash_Structure': cybox_common.FuzzyHashStructureType,
'SubDatum': cybox_common.MetadataType,
'Segment_Hash': cybox_common.HashValueType,
'Digital_Signature': cybox_common.DigitalSignatureInfoType,
'Code_Snippets': cybox_common.CodeSnippetsType,
'Value': cybox_common.StringObjectPropertyType,
'Length': cybox_common.IntegerObjectPropertyType,
'Encoding': cybox_common.ControlledVocabularyStringType,
'Internationalization_Settings': cybox_common.InternationalizationSettingsType,
'Tool_Configuration': cybox_common.ToolConfigurationType,
'English_Translation': cybox_common.StringObjectPropertyType,
'Functions': cybox_common.FunctionsType,
'String_Value': cybox_common.StringObjectPropertyType,
'Build_Utility_Platform_Specification': cybox_common.PlatformSpecificationType,
'Compiler_Informal_Description': cybox_common.CompilerInformalDescriptionType,
'System': cybox_common.ObjectPropertiesType,
'Platform': cybox_common.PlatformSpecificationType,
'Usage_Context_Assumptions': cybox_common.UsageContextAssumptionsType,
'Type': cybox_common.ControlledVocabularyStringType,
'Compilers': cybox_common.CompilersType,
'Tool_Type': cybox_common.ControlledVocabularyStringType,
'String': cybox_common.ExtractedStringType,
'Tool': cybox_common.ToolInformationType,
'Build_Information': cybox_common.BuildInformationType,
'Tool_Hashes': cybox_common.HashListType,
'Compiler_Platform_Specification': cybox_common.PlatformSpecificationType,
'Error_Instances': cybox_common.ErrorInstancesType,
'Data_Segment': cybox_common.StringObjectPropertyType,
'Certificate_Subject': cybox_common.StringObjectPropertyType,
'Language': cybox_common.StringObjectPropertyType,
'Identifier': cybox_common.PlatformIdentifierType,
'Strings': cybox_common.ExtractedStringsType,
'File_System_Offset': cybox_common.IntegerObjectPropertyType,
'Reference_Description': cybox_common.StructuredTextType,
'Code_Snippet': cybox_common.ObjectPropertiesType,
'Configuration_Settings': cybox_common.ConfigurationSettingsType,
'Simple_Hash_Value': cybox_common.SimpleHashValueType,
'Byte_String_Value': cybox_common.HexBinaryObjectPropertyType,
'Instance': cybox_common.ObjectPropertiesType,
'Import': cybox_common.StringObjectPropertyType,
'Property': cybox_common.PropertyType,
'Tool_Specific_Data': cybox_common.ToolSpecificDataType,
'Execution_Environment': cybox_common.ExecutionEnvironmentType,
'Dependencies': cybox_common.DependenciesType,
'Offset': cybox_common.IntegerObjectPropertyType,
'Date': cybox_common.DateRangeType,
'Hashes': cybox_common.HashListType,
'Segments': cybox_common.HashSegmentsType,
'Segment_Count': cybox_common.IntegerObjectPropertyType,
'Usage_Context_Assumption': cybox_common.StructuredTextType,
'Block_Hash': cybox_common.FuzzyHashBlockType,
'Dependency': cybox_common.DependencyType,
'Error': cybox_common.ErrorType,
'Trigger_Point': cybox_common.HexBinaryObjectPropertyType,
'Environment_Variable': cybox_common.EnvironmentVariableType,
'Byte_Run': cybox_common.ByteRunType,
'Contributors': cybox_common.PersonnelType,
'Image_Offset': cybox_common.IntegerObjectPropertyType,
'Imports': cybox_common.ImportsType,
'Library': cybox_common.LibraryType,
'References': cybox_common.ToolReferencesType,
'Internal_Strings': cybox_common.InternalStringsType,
'Custom_Properties': cybox_common.CustomPropertiesType,
'Configuration_Setting': cybox_common.ConfigurationSettingType,
'Libraries': cybox_common.LibrariesType,
'Function': cybox_common.StringObjectPropertyType,
'Description': cybox_common.StructuredTextType,
'User_Account_Info': cybox_common.ObjectPropertiesType,
'Build_Configuration': cybox_common.BuildConfigurationType,
'VLAN_Name': cybox_common.StringObjectPropertyType,
'Address': cybox_common.HexBinaryObjectPropertyType,
'Search_Within': cybox_common.IntegerObjectPropertyType,
'Segment': cybox_common.HashSegmentType,
'Compiler': cybox_common.CompilerType,
'Name': cybox_common.StringObjectPropertyType,
'Address_Value': cybox_common.StringObjectPropertyType,
'VLAN_Num': cybox_common.IntegerObjectPropertyType,
'Signature_Description': cybox_common.StringObjectPropertyType,
'Block_Size': cybox_common.IntegerObjectPropertyType,
'Search_Distance': cybox_common.IntegerObjectPropertyType,
'Fuzzy_Hash_Value': cybox_common.FuzzyHashValueType,
'Dependency_Description': cybox_common.StructuredTextType,
'Contributor': cybox_common.ContributorType,
'Tools': cybox_common.ToolsInformationType,
'Data_Size': cybox_common.DataSizeType,
}
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = GDSClassesMapping.get(tag)
if rootClass is None:
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Address'
rootClass = AddressObjectType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
# sys.stdout.write('<?xml version="1.0" ?>\n')
# rootObj.export(sys.stdout, 0, name_=rootTag,
# namespacedef_='',
# pretty_print=True)
return rootObj
def parseEtree(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Address'
rootClass = AddressObjectType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
rootElement = rootObj.to_etree(None, name_=rootTag)
content = etree_.tostring(rootElement, pretty_print=True,
xml_declaration=True, encoding="utf-8")
sys.stdout.write(content)
sys.stdout.write('\n')
return rootObj, rootElement
def parseString(inString):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Address'
rootClass = AddressObjectType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
# sys.stdout.write('<?xml version="1.0" ?>\n')
# rootObj.export(sys.stdout, 0, name_="Address",
# namespacedef_='')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"AddressObjectType"
]
| [
"ssnow@mitre.org"
] | ssnow@mitre.org |
25c778bb93b1774fe789a08191b367d0cdeceb2b | abacbf9798f089cd43fd50c2d577de50cca806d8 | /venv/Lib/site-packages/example/webalone/migrations/versions/060a77af98e5_initial.py | 051775fdf12e92177407e2b75bdcd612686b8d77 | [] | no_license | Sarveshr49/ProInternSML | f2bfd82905dd185d82830d4758d69ee2b23f71fb | 2ac09e31ebe54dbecd46935818b089a4b8428354 | refs/heads/master | 2023-08-11T17:36:16.387236 | 2021-10-16T18:23:04 | 2021-10-16T18:23:04 | 373,503,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,466 | py | """initial
Revision ID: 060a77af98e5
Revises:
Create Date: 2016-04-20 08:54:54.125614
"""
# revision identifiers, used by Alembic.
revision = '060a77af98e5'
down_revision = None
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
import odm
import odm.types
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_default():
### commands auto generated by Alembic - please adjust! ###
op.create_table('group',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=80), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name'),
sa.UniqueConstraint('name')
)
op.create_table('permission',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=120), nullable=True),
sa.Column('description', sa.String(length=120), nullable=True),
sa.Column('policy', odm.types.json.JSONType(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('description'),
sa.UniqueConstraint('description'),
sa.UniqueConstraint('name'),
sa.UniqueConstraint('name')
)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=50), nullable=True),
sa.Column('first_name', sa.String(length=30), nullable=True),
sa.Column('last_name', sa.String(length=30), nullable=True),
sa.Column('email', sa.String(length=120), nullable=True),
sa.Column('password', sa.String(length=120), nullable=True),
sa.Column('active', sa.Boolean(), nullable=True),
sa.Column('superuser', sa.Boolean(), nullable=True),
sa.Column('joined', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('username'),
sa.UniqueConstraint('username')
)
op.create_table('groups_permissions',
sa.Column('group_id', sa.Integer(), nullable=True),
sa.Column('permission_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['group_id'], ['group.id'], ),
sa.ForeignKeyConstraint(['permission_id'], ['permission.id'], )
)
op.create_table('mailinglist',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(length=120), nullable=True),
sa.Column('topic', sa.String(length=60), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('email')
)
op.create_table('registration',
sa.Column('id', sa.String(length=40), nullable=False),
sa.Column('expiry', sa.DateTime(), nullable=False),
sa.Column('confirmed', sa.Boolean(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('token',
sa.Column('id', odm.types.uuid.UUIDType(length=16), nullable=False),
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('expiry', sa.DateTime(), nullable=True),
sa.Column('ip_address', odm.types.ip_address.IPAddressType(length=50), nullable=True),
sa.Column('user_agent', sa.String(length=80), nullable=True),
sa.Column('last_access', sa.DateTime(), nullable=True),
sa.Column('session', sa.Boolean(), nullable=True),
sa.Column('description', sa.String(length=256), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('users_groups',
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('group_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['group_id'], ['group.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], )
)
### end Alembic commands ###
def downgrade_default():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('users_groups')
op.drop_table('token')
op.drop_table('registration')
op.drop_table('mailinglist')
op.drop_table('groups_permissions')
op.drop_table('user')
op.drop_table('permission')
op.drop_table('group')
### end Alembic commands ###
| [
"sarveshragade@gmail.com"
] | sarveshragade@gmail.com |
f9d339753695d27f8f5ee038d683f7d5953ca007 | 8c02ce5c53c99d2036685d81e60a6506ce87347c | /tests.py | a5c57797d55aaba8fe954f1832ea405e233a548e | [
"MIT"
] | permissive | tonyseek/flask-misaka | fcb180e4fc44cbd600f5eaecea86119f81bc5ff9 | 9941a5ed770325c884fced429efc6bbc494f7261 | refs/heads/master | 2020-12-03T05:21:16.213025 | 2013-08-29T04:19:48 | 2013-08-29T04:19:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,305 | py | from flask import Flask, render_template_string, Markup
from unittest import TestCase
import mock
import misaka
from misaka import (EXT_AUTOLINK, EXT_FENCED_CODE,
EXT_LAX_HTML_BLOCKS, EXT_NO_INTRA_EMPHASIS, EXT_SPACE_HEADERS,
EXT_STRIKETHROUGH, EXT_SUPERSCRIPT, EXT_TABLES, HTML_ESCAPE,
HTML_EXPAND_TABS, HTML_HARD_WRAP, HTML_SAFELINK, HTML_SKIP_HTML,
HTML_SKIP_IMAGES, HTML_SKIP_LINKS, HTML_SKIP_STYLE, HTML_SMARTYPANTS,
HTML_TOC, HTML_TOC_TREE, HTML_USE_XHTML, TABLE_ALIGNMASK, TABLE_ALIGN_C,
TABLE_ALIGN_L, TABLE_ALIGN_R, TABLE_HEADER)
from flask.ext.misaka import Misaka, markdown
TEST_MD = "*This* ~~contains~~ ``some`` mark^(down) extensions: www.markdown.com foo_bar_baz it's"
app = Flask(__name__)
app.debug = True
Misaka(app)
### templating tests ###
@app.route('/a')
def view_render_inline():
s = u"This is ~~restructuredtext~~ *markdown*"
return render_template_string('{{s|markdown}}', s=s)
def test_render_inline():
client = app.test_client()
resp = client.open('/a')
assert resp.data == u'<p>This is ~~restructuredtext~~ <em>markdown</em></p>\n'
@app.route('/b')
def view_render_var_block():
s = u"This is a *markdown* block"
tpl = u'''{% filter markdown %}{{s}}{% endfilter %}'''
return render_template_string(tpl, s=s)
def test_render_var_block():
client = app.test_client()
resp = client.open('/b')
assert resp.data == u'<p>This is a <em>markdown</em> block</p>\n'
@app.route('/c')
def view_render_in_block():
tpl = u'''{% filter markdown %}This is a *markdown* block{% endfilter %}'''
return render_template_string(tpl)
def test_render_in_block():
client = app.test_client()
resp = client.open('/c')
assert resp.data == u'<p>This is a <em>markdown</em> block</p>\n'
### markdown extensions in templates
extapp = Flask(__name__)
extapp.debug = True
Misaka(extapp, strikethrough=True)
@extapp.route('/d')
def view_render_inline_ext():
s = u"This is ~~restructuredtext~~ *markdown*"
return render_template_string('{{s|markdown}}', s=s)
def test_render_inline_ext():
client = extapp.test_client()
resp = client.open('/d')
assert resp.data == u'<p>This is <del>restructuredtext</del> <em>markdown</em></p>\n'
# Note that the Markdown extension tests aren't actually testing that the
# Markdown is rendered correctly; that should be covered by the test suite of
# the misaka module. These tests should test that Flask-Misaka is calling
# the misaka module correctly, and returning the result unmodified
# (aside from being wrapped in a Markup class instance.)
@mock.patch("flask.ext.misaka.misaka.html", side_effect=misaka.html)
class MarkdownExtensionTests(TestCase):
def test_defaults(self, html):
ext, flags = 0, 0
result = markdown(TEST_MD)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_one_ext(self, html):
ext, flags = EXT_AUTOLINK, 0
result = markdown(TEST_MD, autolink=True)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_two_ext(self, html):
ext, flags = EXT_FENCED_CODE | EXT_LAX_HTML_BLOCKS, 0
result = markdown(TEST_MD, fenced_code=True, lax_html=True)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_one_render(self, html):
ext, flags = 0, HTML_ESCAPE
result = markdown(TEST_MD, escape=True)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_two_render(self, html):
ext, flags = 0, HTML_HARD_WRAP | HTML_SAFELINK
result = markdown(TEST_MD, wrap=True, safelink=True)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_one_ext_one_render(self, html):
ext, flags = EXT_NO_INTRA_EMPHASIS, HTML_SKIP_HTML
result = markdown(TEST_MD, no_intra_emphasis=True, no_html=True)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_two_ext_two_render(self, html):
ext = EXT_STRIKETHROUGH | EXT_SUPERSCRIPT
flags = HTML_SKIP_LINKS | HTML_SKIP_STYLE
result = markdown(TEST_MD, strikethrough=True, superscript=True,
skip_links=True, no_style=True)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_inverse(self, html):
ext, flags = EXT_NO_INTRA_EMPHASIS, 0
result = markdown(TEST_MD, intra_emphasis=False)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_undefined_option(self, html):
ext, flags = 0, 0
result = markdown(TEST_MD, fireworks=True)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_defined_and_undefined_options(self, html):
ext, flags = 0, HTML_SMARTYPANTS
result = markdown(TEST_MD, smartypants=True, stupidpants=False)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_set_defaults(self, html):
ext, flags = EXT_TABLES, HTML_SMARTYPANTS
md = Misaka(smartypants=True, tables=True)
result = md.render(TEST_MD)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_override_defaults(self, html):
ext, flags = 0, 0
md = Misaka(autolink=True)
result = md.render(TEST_MD, autolink=False)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
| [
"david@davidbaumgold.com"
] | david@davidbaumgold.com |
0cddd922068bf6e2bc543a5e5fd88b7d5f3d7c4a | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/eqptcapacity/vlanxlateentries15min.py | 01943d9b890521e7efcbea5a8e5825e5c9e8ac29 | [] | no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 11,622 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class VlanXlateEntries15min(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = StatsClassMeta("cobra.model.eqptcapacity.VlanXlateEntries15min", "Vlan Xlate entries")
counter = CounterMeta("normalized", CounterCategory.GAUGE, "percentage", "Vlan Xlate Entries Usage")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "normalizedLast"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "normalizedMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "normalizedMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "normalizedAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "normalizedSpct"
counter._propRefs[PropCategory.IMPLICIT_TOTAL] = "normalizedTtl"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "normalizedThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "normalizedTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "normalizedTr"
meta._counters.append(counter)
meta.moClassName = "eqptcapacityVlanXlateEntries15min"
meta.rnFormat = "CDeqptcapacityVlanXlateEntries15min"
meta.category = MoCategory.STATS_CURRENT
meta.label = "current Vlan Xlate entries stats in 15 minute"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = True
meta.parentClasses.add("cobra.model.eqptcapacity.Entity")
meta.superClasses.add("cobra.model.eqptcapacity.VlanXlateEntries")
meta.superClasses.add("cobra.model.stats.Curr")
meta.superClasses.add("cobra.model.stats.Item")
meta.rnPrefixes = [
('CDeqptcapacityVlanXlateEntries15min', False),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "normalizedAvg", "normalizedAvg", 20945, PropCategory.IMPLICIT_AVG)
prop.label = "Vlan Xlate Entries Usage average value"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedAvg", prop)
prop = PropMeta("str", "normalizedLast", "normalizedLast", 20942, PropCategory.IMPLICIT_LASTREADING)
prop.label = "Vlan Xlate Entries Usage current value"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedLast", prop)
prop = PropMeta("str", "normalizedMax", "normalizedMax", 20944, PropCategory.IMPLICIT_MAX)
prop.label = "Vlan Xlate Entries Usage maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedMax", prop)
prop = PropMeta("str", "normalizedMin", "normalizedMin", 20943, PropCategory.IMPLICIT_MIN)
prop.label = "Vlan Xlate Entries Usage minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedMin", prop)
prop = PropMeta("str", "normalizedSpct", "normalizedSpct", 20946, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Vlan Xlate Entries Usage suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedSpct", prop)
prop = PropMeta("str", "normalizedThr", "normalizedThr", 20948, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Vlan Xlate Entries Usage thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("normalizedThr", prop)
prop = PropMeta("str", "normalizedTr", "normalizedTr", 20950, PropCategory.IMPLICIT_TREND)
prop.label = "Vlan Xlate Entries Usage trend"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedTr", prop)
prop = PropMeta("str", "normalizedTrBase", "normalizedTrBase", 20949, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "Vlan Xlate Entries Usage trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedTrBase", prop)
prop = PropMeta("str", "normalizedTtl", "normalizedTtl", 20947, PropCategory.IMPLICIT_TOTAL)
prop.label = "Vlan Xlate Entries Usage total sum"
prop.isOper = True
prop.isStats = True
meta.props.add("normalizedTtl", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"rrishike@cisco.com"
] | rrishike@cisco.com |
5fae31584041719660552b0480a326fbc13274c6 | a13456ff06c5429e48c2ee08c456061eeba09c07 | /pg11.py | eb20365d811f30d9b3b5d77f4fab2b4fd6edc465 | [] | no_license | ramyasutraye/Python_programming-2 | e1dad9151882e7dd267893c6d93b9fb1415dad51 | fdfd51f4263345ffb33a93d5a6b420c8ff90ce12 | refs/heads/master | 2020-04-24T02:56:18.141652 | 2018-12-06T06:55:25 | 2018-12-06T06:55:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | b = int(input(" "))
p = int(input(" "))
c=b**p
print(c)
| [
"noreply@github.com"
] | ramyasutraye.noreply@github.com |
a7b128e2ceda284f59e9a1fab21f774ce6c065d8 | 668a956d20eabae835a90b2d688f2232707e53d5 | /day18/untitled1/website/migrations/0001_initial.py | 732c12c19a989b2e4d383f06d92ee3e2ab72bac9 | [] | no_license | cq146637/The_front_background | e476c85dfa50cb181f350a5c36e241256637b500 | fefb3db705fe311022568619eb9e006c83b2eaf2 | refs/heads/master | 2021-08-31T23:04:27.298570 | 2017-12-23T09:22:51 | 2017-12-23T09:22:51 | 114,733,513 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 619 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-11-04 07:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='UserInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=20)),
('password', models.CharField(max_length=50)),
],
),
]
| [
"1016025625@qq.com"
] | 1016025625@qq.com |
b79e3b31ea6038a2722fc0070b0ee58ab17da473 | c7e63d5ee80a96024182846c7a4f01b10da36575 | /get_online_phys_data/cat_all_csv_files.py | 2b09ace094ea925e09792589a6f021f0e1071ab8 | [] | no_license | tommylees112/pi_weather_station | c0fa2730b9158b1f36a8dd1a77f139f4e5d6971e | ef880c93afdfb08f64c517fe30a49f8efad313f4 | refs/heads/master | 2022-10-11T20:16:28.902304 | 2022-09-28T13:47:04 | 2022-09-28T13:47:04 | 143,993,191 | 2 | 2 | null | 2022-09-28T13:47:05 | 2018-08-08T09:43:15 | Python | UTF-8 | Python | false | false | 81 | py | # cat_all_csv_files.py
import glob
glob.glob("oxford_weather_station/*/*.pkl")
| [
"thomas.lees112@gmail.com"
] | thomas.lees112@gmail.com |
464e20ba504d56b1908b9d7160f72647b6baa3a5 | dd9a9649c6c82e1decaf4d2ea56c198b18cdd395 | /api/views.py | 9e6090589a87c02eb45be05750e9b6ed880cefdb | [] | no_license | srrobin/rest-framework-project | e56d2523e8d23109b308bfe8b6ea4bd0511b9da9 | d3050216b0f2cd9c81ad4dc57aa468cc6e007b0a | refs/heads/master | 2020-11-24T15:26:26.462369 | 2019-12-15T16:54:47 | 2019-12-15T16:54:47 | 228,215,569 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 829 | py | from rest_framework import generics,permissions
from quote.models import QuoteList
from quote.models import QuoteCategory
from .serializers import QuoteSerializer
from .serializers import QuoteCategorySerializer
class QuoteAPIView(generics.ListAPIView):
permission_classes =(permissions.IsAuthenticated,)
queryset = QuoteList.objects.all()
serializer_class = QuoteSerializer
class QuoteCategoryAPIView(generics.ListAPIView):
queryset = QuoteCategory.objects.all()
serializer_class = QuoteCategorySerializer
class QuoteAPIDetailView( generics.RetrieveUpdateDestroyAPIView):
queryset = QuoteList.objects.all()
serializer_class = QuoteSerializer
class QuoteAPINewView(generics.ListCreateAPIView):
queryset = QuoteList.objects.all().order_by('-id')[:1]
serializer_class = QuoteSerializer | [
"you@domain.com"
] | you@domain.com |
f923ced813c51ae62ed4fb7fb339dade9a332ae9 | 35cf6fc79b8d6c335add8e55e0f4dca6f2816d1d | /Python_Study/第三模块学习/Shool_CRM/bin/start.py | 1bce8f2e71d6509dc9737bf36539adace09d8bde | [] | no_license | KongChan1988/51CTO-Treasure | 08b4ca412ad8a09d67c1ea79c7149f8573309ca4 | edb2e4bd11d39ac24cd240f3e815a88361867621 | refs/heads/master | 2021-07-04T15:57:56.164446 | 2019-07-24T15:28:36 | 2019-07-24T15:28:36 | 97,453,749 | 5 | 8 | null | 2019-10-30T22:05:12 | 2017-07-17T08:34:59 | Python | UTF-8 | Python | false | false | 250 | py | #-*- Coding:utf-8 -*-
# Author: D.Gray
import os,sys
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
print(BASE_DIR)
sys.path.append(BASE_DIR)
from core import core
if __name__ == '__main__':
a = core.Conter()
a.run() | [
"wangwei_198811@163.com"
] | wangwei_198811@163.com |
434a48e868ae18d561bb5d11ae4e5b288a8152da | c67029032e5f35eaae448d601d8cb870fd2b2953 | /WeatherApp/weather/migrations/0001_initial.py | 7fc72879a9963b8494cd28f9e4794ab6f4640218 | [
"MIT"
] | permissive | VToropov1337/django_weather | c6be2a882ed3a096e3df394acc143fdfa8f798d3 | 936637578f52679d6b79becc77372055dad6008f | refs/heads/master | 2020-12-04T20:00:13.632506 | 2020-01-05T12:47:06 | 2020-01-05T12:47:06 | 231,887,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 481 | py | # Generated by Django 3.0 on 2020-01-05 10:14
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='City',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
],
),
]
| [
"vladimirtoropov87@gmail.com"
] | vladimirtoropov87@gmail.com |
dce5f9b2ef6fb2aa4b68de8e9b42a8e58f0c7336 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03786/s612653284.py | 3f76c3a440685a6568c77eac0ac77920bf2a28d3 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | n = int(input())
a = sorted(list(map(int,input().split())))
cur = a[0]
count = 1
for i in range(1,n):
if a[i] <= cur*2:
count += 1
else:
count = 1
cur += a[i]
print(count) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
e4c365971bff3e2af3483a510bf04d821556aff0 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/429/usersdata/309/106184/submittedfiles/jogoDaVelha_BIB.py | 46e2792cb92d6791a026e2704a7844089d936447 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,444 | py | # -*- coding: utf-8 -*-
from datetime import datetime
from random import randint
# autenticação do simbolo para a jogada humano
def solicitaSimboloDoHumano():
# nome=input('Qual seu nome(ou apelido)? ')
simbH= (input("\nQual o simbolo que você deseja utilizar no jogo? "))
while simbH!="X" and simbH!="O" and simbH!="o" and simbH!="x" :
print ("\nOps! Simbolo inválido")
simbH= input("\nInforme um simbolo válido que deseja utilizar para a partida: X ou O : ")
if simbH=="X" or simbH=="x":
simbH="X"
else:
simbH="O"
return simbH
#sorteio
def sorteioPrimeiraJogada (simbM, simbH, tabuleiro, nome):
now= datetime.now()
a=now.second
#essa var serve para ajudar a definir de quem será aproxima jogada
pro=0
if a%2==0:
print("\nVencedor do sorteio para inicio do jogo: Computador")
prop=1
# chama a função mostraTabuleiro com a jogada do computador
tabuleiro=jogadaComputador(tabuleiro, simbM)
mostraTabuleiro(tabuleiro)
else:
print("\nVencedor do sorteio para inicio do jogo: Jogador")
prop=2
#chama a função mostraTabuleiro com a jogada do jogador
#tabuleiro=jogadaHumana(nome, simbH, tabuleiro)
return prop
#Função para printar o tabuleiro:
def mostraTabuleiro(tabuleiro):
print(' ')
print (tabuleiro[0][0] +' | '+ tabuleiro[0][1] + ' | '+ tabuleiro[0][2])
print (tabuleiro[1][0] +' | '+ tabuleiro[1][1] + ' | '+ tabuleiro[1][2])
print (tabuleiro[2][0] +' | '+ tabuleiro[2][1] + ' | '+ tabuleiro[2][2])
#Função da jogada do humano
def jogadaHumana(nome, simbH, tabuleiro):
casa=[]
casa=input("\n Qual a sua jogada, %s ? " %nome)
#tabuleiro[casa//10][casa%10]=simbH
i=int(casa[0])
j=int(casa[2])
while i>2 and j>2 or i<0 and j<0 :
print('\nOps! Jogada invalida... ')
casa=int(input("\n Qual a sua jogada, %s ?" %nome))
i=int(casa[0])
j=int(casa[2])
validarJogada(nome, simbH, tabuleiro, i, j)
return tabuleiro
#Função para validar uma jogada
def validarJogada(nome, simbH, tabuleiro, i, j):
if tabuleiro[i][j]!="X" and tabuleiro[i][j]!="O" :
tabuleiro[i][j]=simbH
else:
print ("\nOPS!!! Essa jogada não está disponível. Tente novamente!")
jogadaHumana(nome, simbH, tabuleiro)
#Função da Jogada do computador
def jogadaComputador(tabuleiro, simbM):
sortL=randint(0, 2)
sortC=randint(0, 2)
while tabuleiro[sortL][sortC] !=" " :
sortL=randint(0, 2)
sortC=randint(0, 2)
tabuleiro[sortL][sortC]=simbM
return tabuleiro
#Função que verifica o vencedor
def VerificaVencedor(tab, simbH, nome, simbM):
x=1
if tab[0][0]==tab[0][2] and tab[0][0]==tab[0][1] and tab[0][1]==tab[0][2]:
if tab[0][0]==simbH:
x=2
elif tab[0][0]==simbM:
x=4
elif tab[1][0]==tab[1][1] and tab[1][1]==tab[1][2] and tab[1][0]==tab[1][2]:
if tab[1][0]==simbH:
x=2
elif tab[1][0]==simbM:
x=4
elif tab[2][0]==tab[2][1] and tab[2][1]==tab[2][2] and tab[2][0]==tab[2][2]:
if tab[2][0]==simbH:
x=2
elif tab[2][0]==simbM:
x=4
elif tab[0][0]==tab[1][0] and tab[2][0]==tab[0][0] and tab[2][0]==tab[1][0]:
if tab[1][0]==simbH:
x=2
elif tab[1][0]==simbM:
x=4
elif tab[0][1]==tab[1][1] and tab[1][1]==tab[2][1] and tab[0][1]==tab[2][1]:
if tab[1][1]==simbH:
x=2
elif tab[1][1]==simbM:
x=4
elif tab[0][2]==tab[1][2] and tab[1][2]==tab[2][2] and tab[0][2]==tab[2][2]:
if tab[2][2]==simbH:
x=2
elif tab[2][2]==simbM:
x=4
elif tab[0][0]==tab[1][1] and tab[1][1]==tab[2][2] and tab[0][0]==tab[2][2]:
if tab[0][0]==simbH:
x=2
elif tab[0][0]==simbM:
x=4
elif tab[0][2]==tab[1][1] and tab[1][1]==tab[2][0] and tab[2][0]==tab[0][2]:
if tab[2][0]==simbH:
x=2
elif tab[2][0]==simbM:
x=4
elif tab[0][0]!=" " and tab[0][1]!=" " and tab[0][2]!=" " and tab[1][0]!=" " and tab[1][1]!=" " and tab[1][2]!=" " and tab[2][0]!=" " and tab[2][1]!=" " and tab[2][2]!=" ":
print ('Deu velha')
x=6
return x
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
d9596a089de2fc79e1fc2156fb5fe244e800b3a3 | d06f9135fd0f42e59df4c91324cd22124a3486d7 | /Study_Groups/mapper.py | 8ac83c84dbee973eb3f0421b64d8367921a97844 | [] | no_license | jlyang1990/MapReduce | 7546599afaa9b8366e02a0724a6b77cc46149b30 | d01a4d7c4e12e0c77ff820fb2a20a9210c9ce3b0 | refs/heads/master | 2020-06-13T16:56:10.008429 | 2016-12-05T08:16:33 | 2016-12-05T08:16:36 | 75,523,296 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,485 | py | #!/usr/bin/python
"""
In this exercise your task is to write a mapreduce program that for each forum thread (that is a question node with all it's answers and comments)
would give us a list of students that have posted there - either asked the question, answered a question or added a comment.
If a student posted to that thread several times, they should be added to that list several times as well, to indicate intensity of communication.
"""
"""
column names of forum_node.tsv are:
id, title, tagnames, author_id, body, node_type, parent_id, abs_parent_id, added_at, score, state_string,
last_edited_id, last_activity_by_id, last_activity_at, active_revision_id, extra, extra_ref_id, extra_count, marked
The ones that are the most relevant to the task are:
"id": id of the node
"title": title of the node. in case "node_type" is "answer" or "comment", this field will be empty
"tagnames": space separated list of tags
"author_id": id of the author
"body": content of the post
"node_type": type of the node, either "question", "answer" or "comment"
"parent_id": node under which the post is located, will be empty for "questions"
"abs_parent_id": top node where the post is located
"added_at": date added
"""
import sys
import csv
reader = csv.reader(sys.stdin, delimiter = '\t')
for line in reader:
if len(line) == 19 and line[0] != "id":
if line[5] == "question":
print "{0}\t{1}".format(line[0], line[3])
else:
print "{0}\t{1}".format(line[6], line[3])
| [
"jlyang@ucdavis.edu"
] | jlyang@ucdavis.edu |
1ddfcadb90f807f864cbfd5f09862558cbbcd387 | 8839bd1f2e35726b6c8066985690fa2fa86b09a6 | /5.pyAI-K210/5.项目应用/2.视频播放/video_play.py | 8f0972098bff92c05d8d2969bc4f8bdbdf482f2a | [
"MIT"
] | permissive | elektrik-elektronik-muhendisligi/MicroPython-Examples-1 | a9532b06aba470f7f26f841929f4fb145549f70b | f7b08e95ff73e3417af21918c9c6bcf2f83281c6 | refs/heads/master | 2021-05-25T22:58:36.207098 | 2020-04-01T09:50:53 | 2020-04-01T09:50:53 | 253,956,073 | 1 | 0 | null | 2020-04-08T01:39:46 | 2020-04-08T01:39:45 | null | UTF-8 | Python | false | false | 807 | py | '''
实验名称:视频播放器
版本:v1.0
日期:2019.12
翻译和注释:01Studio
说明:AVI视频播放。
'''
import video,time
from Maix import GPIO
from board import board_info
from fpioa_manager import fm
import lcd
lcd.init()
# 音频使能IO
AUDIO_PA_EN_PIN = 32
#注册音频使能IO
if AUDIO_PA_EN_PIN:
fm.register(AUDIO_PA_EN_PIN, fm.fpioa.GPIO1, force=True)
#注册音频控制IO
fm.register(34, fm.fpioa.I2S0_OUT_D1, force=True)
fm.register(35, fm.fpioa.I2S0_SCLK, force=True)
fm.register(33, fm.fpioa.I2S0_WS, force=True)
#播放avi文件
v = video.open("/sd/badapple.avi")
#打印视频文件信息
print(v)
#音量调节
v.volume(5)
while True:
if v.play() == 0: #播放完毕
print("play end")
break
v.__del__() #销毁对象,释放内存
| [
"237827161@qq.com"
] | 237827161@qq.com |
cddcb6fcda2ccfcf7c8853049983101c62c2c2c7 | 739e91039c05943352a3fc07e768641f74097482 | /Python_self/Binary_Tree.py | 9b0dcbbdfde156b128cf02239d2a618ab114eadf | [] | no_license | doublejy715/Problem-Solve | 651182079ded1a9da3478dd30a4c4507894de85e | 57d5a672a48103769c8cc022cb7132d988624600 | refs/heads/master | 2023-07-13T09:51:23.145427 | 2021-08-31T04:35:04 | 2021-08-31T04:35:04 | 234,250,382 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,212 | py | '''
트리는 변수를 head를 가진다 root node이다.
기본적으로 이진트리는 링크드 리스트로 형성 할 수 있다.
기본적으로 노드 형성 클래스와 트리 클래스로 나누어서 생각한다.
1. class node
data, left, right
2. class tree
insert
search(종류 : DLR)
delete
'''
class Node():
def __init__(self,value):
self.value = value
self.left = None
self.right = None
class tree():
def __init__(self,head):
self.head = head
# 1. insert
def insert(self,data):
self.current_node = self.head # 시작할 위치를 지정해 놓는다.
while True: # 해당 조건이 만족할 동안만 작동한다.
# 값의 크기를 비교해서 넣어준다.
# 1.1 삽입하려는 값이 현재의 값보다 작은 경우.
if self.current_node.value > data:
if self.current_node.left != None:
self.current_node = self.current_node.left
else:
self.current_node.left = Node(data)
break
# 1.2 삽입하려는 값이 현재의 값보다 큰 경우
elif self.current_node.value < data:
if self.current_node.right != None:
self.current_node = self.current_node.right
else:
self.current_node.right = Node(data)
break
# 2. search
def search(self,data):
self.current_node = self.head
while self.current_node.data:
# 2.1 찾는 값이 일치하는 경우
if self.current_node == data:
return True
# 2.2 찾는 값이 현재 노드보다 더 큰 경우
elif self.current_node > data:
self.current_node = self.current_node.right
# 2.3 찾는 값이 현재 노드보다 더 작은 경우
else:
self.current_node = self.current_node.left
# 찾지 못하는 경우
return False
head = Node(2)
BST = tree(head)
BST.insert(1)
BST.insert(3)
BST.search(1) | [
"doublejy715@gmail.com"
] | doublejy715@gmail.com |
025ee506dce791e2417711b08ae51f16ccd15efc | 757aace69943122a21b1fac07ea43199e2ca1228 | /lib/python3.5/site-packages/launch/__init__.py | 39e23c9c10ac1f5300733afa7dbe86a03992527d | [] | no_license | iamsile/ros2-for-os | f9faa522b2f17f04e07de88a6053599fa9a987bc | a83f66a6a34ec9ec969b54194e3bdd32d5b9d092 | refs/heads/master | 2021-01-20T16:31:05.458482 | 2017-06-29T02:00:48 | 2017-06-29T02:00:48 | 95,727,695 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,953 | py | # Copyright 2015 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from launch.output_handler import CompositeOutputHandler
from launch.output_handler import ConsoleOutput
from launch.exit_handler import default_exit_handler
class LaunchDescriptor(object):
def __init__(self):
self.task_descriptors = []
def add_coroutine(self, coroutine, name=None, exit_handler=None):
if name is not None and name in [p.name for p in self.task_descriptors]:
raise RuntimeError("Task name '%s' already used" % name)
if exit_handler is None:
exit_handler = default_exit_handler
coroutine_descriptor = CoroutineDescriptor(
coroutine, name, exit_handler)
self.task_descriptors.append(coroutine_descriptor)
return coroutine_descriptor
def add_process(self, cmd, name=None, env=None, output_handlers=None, exit_handler=None):
if name is not None and name in [p.name for p in self.task_descriptors]:
raise RuntimeError("Task name '%s' already used" % name)
if output_handlers is None:
output_handlers = [ConsoleOutput()]
output_handlers = CompositeOutputHandler(output_handlers)
if exit_handler is None:
exit_handler = default_exit_handler
process_descriptor = ProcessDescriptor(
cmd, name, output_handlers, exit_handler, env=env)
self.task_descriptors.append(process_descriptor)
return process_descriptor
class TaskDescriptor(object):
def __init__(self):
self.task_state = None
class CoroutineDescriptor(TaskDescriptor):
def __init__(self, coroutine, name, exit_handler):
super(CoroutineDescriptor, self).__init__()
self.coroutine = coroutine
self.name = name
self.exit_handler = exit_handler
class ProcessDescriptor(TaskDescriptor):
def __init__(self, cmd, name, output_handler, exit_handler, env=None):
super(ProcessDescriptor, self).__init__()
self.cmd = cmd
self.name = name
self.output_handler = output_handler
self.exit_handler = exit_handler
self.env = env
self.transport = None
self.protocol = None
def send_signal(self, signal):
if self.transport:
self.transport.send_signal(signal)
def terminate(self):
if self.transport:
self.transport.terminate()
| [
"longxian.tw@alibaba-inc.com"
] | longxian.tw@alibaba-inc.com |
19b1eac27cf2de891f0469d21918d872855415a4 | 5c0c0176db0ccf2c24b6b5ed459a8dc144518b13 | /examples/nas/naive-tf/train.py | f2b2062a8954ca01b8b6e9ef11b2dfe99ca3e815 | [
"MIT"
] | permissive | petuum/nni | ac4f4a1c4d6df71684eeffa127b7c4858fd29e97 | 8134be6269902939232482d63649c06f9864be6d | refs/heads/master | 2023-02-18T11:21:41.078889 | 2021-01-20T03:21:50 | 2021-01-20T03:21:50 | 302,736,456 | 4 | 3 | MIT | 2020-11-20T20:21:15 | 2020-10-09T19:34:11 | Python | UTF-8 | Python | false | false | 2,663 | py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import (AveragePooling2D, BatchNormalization, Conv2D, Dense, MaxPool2D)
from tensorflow.keras.losses import Reduction, SparseCategoricalCrossentropy
from tensorflow.keras.optimizers import SGD
from nni.nas.tensorflow.mutables import LayerChoice, InputChoice
from nni.algorithms.nas.tensorflow.enas import EnasTrainer
class Net(Model):
def __init__(self):
super().__init__()
self.conv1 = LayerChoice([
Conv2D(6, 3, padding='same', activation='relu'),
Conv2D(6, 5, padding='same', activation='relu'),
])
self.pool = MaxPool2D(2)
self.conv2 = LayerChoice([
Conv2D(16, 3, padding='same', activation='relu'),
Conv2D(16, 5, padding='same', activation='relu'),
])
self.conv3 = Conv2D(16, 1)
self.skipconnect = InputChoice(n_candidates=1)
self.bn = BatchNormalization()
self.gap = AveragePooling2D(2)
self.fc1 = Dense(120, activation='relu')
self.fc2 = Dense(84, activation='relu')
self.fc3 = Dense(10)
def call(self, x):
bs = x.shape[0]
t = self.conv1(x)
x = self.pool(t)
x0 = self.conv2(x)
x1 = self.conv3(x0)
x0 = self.skipconnect([x0])
if x0 is not None:
x1 += x0
x = self.pool(self.bn(x1))
x = self.gap(x)
x = tf.reshape(x, [bs, -1])
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return x
def accuracy(truth, logits):
truth = tf.reshape(truth, (-1, ))
predicted = tf.cast(tf.math.argmax(logits, axis=1), truth.dtype)
equal = tf.cast(predicted == truth, tf.int32)
return tf.math.reduce_sum(equal).numpy() / equal.shape[0]
def accuracy_metrics(truth, logits):
acc = accuracy(truth, logits)
return {'accuracy': acc}
if __name__ == '__main__':
cifar10 = tf.keras.datasets.cifar10
(x_train, y_train), (x_valid, y_valid) = cifar10.load_data()
x_train, x_valid = x_train / 255.0, x_valid / 255.0
train_set = (x_train, y_train)
valid_set = (x_valid, y_valid)
net = Net()
trainer = EnasTrainer(
net,
loss=SparseCategoricalCrossentropy(from_logits=True, reduction=Reduction.NONE),
metrics=accuracy_metrics,
reward_function=accuracy,
optimizer=SGD(learning_rate=0.001, momentum=0.9),
batch_size=64,
num_epochs=2,
dataset_train=train_set,
dataset_valid=valid_set
)
trainer.train()
| [
"noreply@github.com"
] | petuum.noreply@github.com |
aeec0d323c820ebcbab293cfab36ab3d59368f88 | 3537265c1b60f0c0eb06d165a0b5779438fc698c | /py/vision/test1/visionlib.py | d1c1136cad4cce4ea52424fc24b104b74f284547 | [
"MIT"
] | permissive | iqihao/mabo.io | f864ba3e158d8c6ee113dd8c0ae8708cae86e9d1 | 7f646db9d5ee3cd0b137866bf8eaf295890f134c | refs/heads/master | 2021-05-30T12:59:56.723936 | 2016-02-10T23:45:36 | 2016-02-10T23:45:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,165 | py |
import itertools
import cv2
import numpy as np
def findKeyPoints(img, template, distance=200):
""" find key points in image """
# SIFT
FEATURE_DETECTOR = "SIFT" #"SURF" # "SIFT"
detector = cv2.FeatureDetector_create(FEATURE_DETECTOR)
descriptor = cv2.DescriptorExtractor_create(FEATURE_DETECTOR)
skp = detector.detect(img)
skp, sd = descriptor.compute(img, skp)
tkp = detector.detect(template)
tkp, td = descriptor.compute(template, tkp)
flann_params = dict(algorithm=1, trees=4)
flann = cv2.flann_Index(sd, flann_params)
idx, dist = flann.knnSearch(td, 1, params={})
del flann
dist = dist[:,0]/2500.0
dist = dist.reshape(-1,).tolist()
idx = idx.reshape(-1).tolist()
indices = range(len(dist))
indices.sort(key=lambda i: dist[i])
dist = [dist[i] for i in indices]
idx = [idx[i] for i in indices]
skp_final = []
for i, dis in itertools.izip(idx, dist):
if dis < distance:
skp_final.append(skp[i])
flann = cv2.flann_Index(td, flann_params)
idx, dist = flann.knnSearch(sd, 1, params={})
del flann
dist = dist[:,0]/2500.0
dist = dist.reshape(-1,).tolist()
idx = idx.reshape(-1).tolist()
indices = range(len(dist))
indices.sort(key=lambda i: dist[i])
dist = [dist[i] for i in indices]
idx = [idx[i] for i in indices]
tkp_final = []
for i, dis in itertools.izip(idx, dist):
if dis < distance:
tkp_final.append(tkp[i])
return skp_final, tkp_final
def drawKeyPoints(img, template, skp, tkp, num=-1):
""" """
h1, w1 = img.shape[:2]
h2, w2 = template.shape[:2]
nWidth = w1+w2
nHeight = max(h1, h2)
hdif = (h1-h2)/2
newimg = np.zeros((nHeight, nWidth, 3), np.uint8)
newimg[hdif:hdif+h2, :w2] = template
newimg[:h1, w2:w1+w2] = img
maxlen = min(len(skp), len(tkp))
if num < 0 or num > maxlen:
num = maxlen
for i in range(num):
pt_a = (int(tkp[i].pt[0]), int(tkp[i].pt[1]+hdif))
pt_b = (int(skp[i].pt[0]+w2), int(skp[i].pt[1]))
cv2.line(newimg, pt_a, pt_b, (255, 0, 0))
return newimg | [
"aidear@163.com"
] | aidear@163.com |
f1583880f8ed6f14fca3054e147fec9973b07d2a | 096dc9b83f9f8764cbd92f628f215d34b4bab4d5 | /src/11/simple_communication_between_interpreters/echoclient.py | 96fc37158b72a9ab6493fd50f1d1c6cb059b4ef9 | [] | no_license | pyarnold/python-cookbook | 18ceecec5a728d88ea9065bb1b2323e59df686a1 | ef8a32741d4571b75d995637a8b2edc3b85a2859 | refs/heads/master | 2021-01-21T09:29:34.998385 | 2014-03-19T14:57:17 | 2014-03-19T14:57:17 | 17,908,202 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | from multiprocessing.connection import Client
c = Client(('localhost', 25000), authkey=b'peekaboo')
c.send('hello')
print('Got:', c.recv())
c.send(42)
print('Got:', c.recv())
c.send([1, 2, 3, 4, 5])
print('Got:', c.recv())
| [
"dave@dabeaz.com"
] | dave@dabeaz.com |
c47d965ba5caaa518434b6d5470b3ab4d1914ce2 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_231/ch22_2020_03_31_22_11_05_701728.py | e06b780c09bc1461dcd97486bc9dfc02678bef27 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 112 | py | c= (input('quantos cigarros vc fuma por dia?'))
a= (input('ha quantos anos?'))
t= (10*c*a)/1440
print(t)
| [
"you@example.com"
] | you@example.com |
fc92399aa42aec6f5c79201431a31e094a729032 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03377/s172991411.py | d2919c59254b5b5c249175befd8776cdf3a52de3 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | def main():
a,b,x = map(int, input().split())
if a<=x and x<=a+b:
print('YES')
else:
print('NO')
if __name__ == '__main__':
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
2bf27f22ff1a3a923f49193f02937201c8afebdf | 3f2d1c68d07dd6677bc19c559b1960ca5fef6346 | /tensorbord/train.py | 6a734e1fb8529e6c98a51c56d939fe1312206302 | [] | no_license | 213584adghj/ml | 6ffcf732377dabda129990e3a89468e18dd2700c | f73080e13c4a1c6babe0229bdb939eb3a7f988b6 | refs/heads/master | 2021-03-13T23:22:41.981534 | 2020-03-12T01:59:21 | 2020-03-12T01:59:21 | 246,720,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,684 | py | # coding: utf-8
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
class train(object):
def __init__(self):
with tf.name_scope('input'):
# 定义两个placeholder
self.x = tf.placeholder(tf.float32, [None, 784], name='x-input')
self.y = tf.placeholder(tf.float32, [None, 10], name='y-input')
self.prediction, self.loss, self.train_step, self.accuracy = self.get_model()
self.train_model()
# 命名空间
pass
def variable_summaries(self, var):
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean) # 平均值
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev) # 标准值
tf.summary.scalar('max', tf.reduce_max(var)) # 最大值
tf.summary.scalar('min', tf.reduce_min(var)) # 最小值
tf.summary.histogram('histogram', var) # 直方图
def get_model(self):
with tf.name_scope('layer'):
# 创建一个简单的神经网络(无隐藏层)
with tf.name_scope('wights'):
W = tf.Variable(tf.zeros([784, 10]), name='W')
self.variable_summaries(W)
with tf.name_scope('biases'):
b = tf.Variable(tf.zeros([10]), name='b')
self.variable_summaries(b)
with tf.name_scope('wx_plus_b'):
wx_plus_b = tf.matmul(self.x, W) + b
with tf.name_scope('softmax'):
prediction = tf.nn.softmax(wx_plus_b)
# 二次代价函数
# loss = tf.reduce_mean(tf.square(y-prediction))
# 交叉熵
with tf.name_scope('loss'):
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=prediction))
tf.summary.scalar('loss', loss)
with tf.name_scope('train'):
# 使用梯度下降法
train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
# 将结果放在一个bool型列表中
correct_prediction = tf.equal(tf.argmax(self.y, 1), tf.argmax(prediction, 1))
with tf.name_scope('accuracy'):
# 求准确率
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', accuracy)
return prediction, loss, train_step, accuracy # 预测,损失函数,训练过程,准确率
def train_model(self):
mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
batch_size = 100
# 计算一共有多少个批次
n_batch = mnist.train.num_examples // batch_size
merged = tf.summary.merge_all()
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
writer = tf.summary.FileWriter('logs', sess.graph)
for epoch in range(51):
for batch in range(n_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
summary, _ = sess.run([merged, self.train_step], feed_dict={self.x: batch_xs, self.y: batch_ys})
writer.add_summary(summary, epoch)
acc = sess.run(self.accuracy, feed_dict={self.x: mnist.test.images, self.y: mnist.test.labels})
print("Iter" + str(epoch) + ",Testing Accuracy" + str(acc))
pass
a = train()
| [
"you@example.com"
] | you@example.com |
51a886388078a4d3e2a0dbdcf134e56b1993c5af | 26cd3fc39f99b74c2741dcbac0d2fed0098d2deb | /thorpy/elements/slidersetter.py | 22b4d5a12c423ed038b9097b61e2fc5154df2817 | [
"MIT"
] | permissive | YannThorimbert/Thorpy-1.4 | b35c4293ba389e13fb7a2691543f9de7030d3f20 | 8dfc5f2fd047ae39c2c8aac1e23326be6152663b | refs/heads/master | 2021-04-09T17:12:34.356309 | 2015-05-11T20:53:18 | 2015-05-11T20:53:18 | 35,449,415 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,006 | py | from thorpy.elements.slider import SliderX
from thorpy.elements.element import Element
from thorpy.miscgui import functions, style, painterstyle
class SliderXSetter(SliderX):
"""Like a slider, but has a get_value method"""
def __init__(self,
length,
limvals=None,
text="",
elements=None,
normal_params=None,
namestyle=None,
valuestyle=None,
typ=float,
initial_value=None):
namestyle = style.STYLE_SLIDER_NAME if namestyle is None else namestyle
valuestyle=style.STYLE_SLIDER_VALUE if valuestyle is None else valuestyle
SliderX.__init__(self, length, limvals, "", elements, normal_params,
initial_value)
self._value_type = typ
self._round_decimals = 2
self._name_element = self._get_name_element(text, namestyle) # herite de setter
self._value_element = self._get_value_element(valuestyle)
self.add_elements([self._name_element, self._value_element])
def finish(self):
SliderX.finish(self)
self._refresh_pos()
self._drag_element.set_setter()
value = str(self.get_value())
self._value_element.set_text(value)
self.set_prison()
def show_value(self, show_value):
self._value_element.visible = show_value
def _get_name_element(self, name, namestyle):
painter = functions.obtain_valid_painter(
painterstyle.CHECKER_NAME_PAINTER,
size=style.SIZE)
el = Element(name)
el.set_painter(painter)
if namestyle:
el.set_style(namestyle)
el.finish()
return el
def _get_value_element(self, valuestyle):
painter = functions.obtain_valid_painter(
painterstyle.CHECKER_VALUE_PAINTER,
size=style.CHECK_SIZE)
el = Element(str(self.get_value()))
el.set_painter(painter)
if valuestyle:
el.set_style(valuestyle)
el.finish()
return el
def _refresh_pos(self):
l = self.get_fus_topleft()[0]
(x, y) = self.get_fus_center()
l -= self._name_element.get_fus_size()[0] + style.MARGINS[0]
self._name_element.set_center((None, y))
self._name_element.set_topleft((l, None))
w = self.get_fus_rect().right + style.MARGINS[0]
self._value_element.set_center((None, y))
self._value_element.set_topleft((w, None))
def refresh_value(self):
self._value_element.unblit()
self._value_element.update()
value = str(self.get_value())
self._value_element.set_text(value)
self._value_element.blit()
self._value_element.update()
def get_value(self):
value = SliderX.get_value(self)
return self._value_type(value)
def set_font_color(self, color, state=None, center_title=True):
"""set font color for a given state"""
SliderX.set_font_color(self, color, state, center_title)
self._name_element.set_font_color(color, state, center_title)
def set_font_size(self, size, state=None, center_title=True):
"""set font size for a given state"""
SliderX.set_font_size(self, size, state, center_title)
self._name_element.set_font_size(size, state, center_title)
def set_font_effects(self, biu, state=None, center=True, preserve=False):
"""biu = tuple : (bold, italic, underline)"""
SliderX.set_font_effects(self, bio, state, center, preserve)
self._name_element.set_font_effects(biu, state, center, preserve)
def pix_to_val(self, pix, x0):
value = SliderX.pix_to_val(self, pix, x0)
if self._value_type is float:
return round(value, self._round_decimals)
elif self._value_type is int:
return int(round(value))
def get_help_rect(self):
return self._name_element.get_help_rect()
| [
"yann.thorimbert@gmail.com"
] | yann.thorimbert@gmail.com |
170ab9e04c87704324e5667f1c73ce9e974a7587 | 4015291afebfd346da3fee4b1d5a775882b5b461 | /services/director-v2/tests/unit/test_models_dynamic_services.py | 03c9084f24b2632efd11873930d77a10d59eb40f | [
"MIT"
] | permissive | pcrespov/osparc-simcore | 3a8a6b5252038542f515c7e90d983ac6f1fb4de7 | eb5e00bc2cf4acfe81f5dc422a5e50a4646c9596 | refs/heads/master | 2023-08-06T04:33:38.594066 | 2023-07-12T09:47:00 | 2023-07-12T09:47:00 | 130,357,545 | 0 | 1 | MIT | 2023-04-18T08:04:27 | 2018-04-20T12:10:41 | Python | UTF-8 | Python | false | false | 6,964 | py | # pylint: disable=redefined-outer-name
import string
from collections import namedtuple
import pytest
from simcore_service_director_v2.models.schemas.dynamic_services import (
RunningDynamicServiceDetails,
SchedulerData,
ServiceBootType,
ServiceState,
)
from simcore_service_director_v2.models.schemas.dynamic_services.scheduler import (
DockerContainerInspect,
)
from simcore_service_director_v2.modules.dynamic_sidecar.docker_states import (
CONTAINER_STATUSES_UNEXPECTED,
extract_containers_minimum_statuses,
)
# the following is the predefined expected ordering, change below test only if
# this order is not adequate anymore
_EXPECTED_ORDER = [
ServiceState.FAILED,
ServiceState.PENDING,
ServiceState.PULLING,
ServiceState.STARTING,
ServiceState.RUNNING,
ServiceState.STOPPING,
ServiceState.COMPLETE,
]
CNT_STS_RESTARTING = "restarting"
CNT_STS_DEAD = "dead"
CNT_STS_PAUSED = "paused"
CNT_STS_CREATED = "created"
CNT_STS_RUNNING = "running"
CNT_STS_REMOVING = "removing"
CNT_STS_EXITED = "exited"
ALL_CONTAINER_STATUSES: set[str] = {
CNT_STS_RESTARTING,
CNT_STS_DEAD,
CNT_STS_PAUSED,
CNT_STS_CREATED,
CNT_STS_RUNNING,
CNT_STS_REMOVING,
CNT_STS_EXITED,
}
RANDOM_STRING_DATASET = string.ascii_letters + string.digits
ExpectedStatus = namedtuple("ExpectedStatus", "containers_statuses, expected_state")
@pytest.fixture
def service_message() -> str:
return "starting..."
@pytest.fixture
def service_state() -> ServiceState:
return ServiceState.RUNNING
@pytest.fixture
def mock_containers_statuses() -> dict[str, dict[str, str]]:
return {
"container_id_1": {"Status": "created"},
"container_id_2": {"Status": "dead", "Error": "something"},
"container_id_3": {"Status": "running"},
}
# UTILS
def _make_status_dict(status: str) -> DockerContainerInspect:
assert status in ALL_CONTAINER_STATUSES
status_dict = {"Status": status}
if status in CONTAINER_STATUSES_UNEXPECTED:
status_dict["Error"] = "failed state here"
return DockerContainerInspect.from_container(
{"State": status_dict, "Name": "", "Id": ""}
)
def get_containers_inspect(*args: str) -> list[DockerContainerInspect]:
return [_make_status_dict(x) for x in args]
def _all_states() -> set[ServiceState]:
return set(ServiceState)
SAMPLE_EXPECTED_STATUSES: list[ExpectedStatus] = [
ExpectedStatus(
containers_statuses=get_containers_inspect(
CNT_STS_RESTARTING, CNT_STS_EXITED, CNT_STS_RUNNING
),
expected_state=ServiceState.FAILED,
),
ExpectedStatus(
containers_statuses=get_containers_inspect(CNT_STS_CREATED, CNT_STS_RUNNING),
expected_state=ServiceState.STARTING,
),
ExpectedStatus(
containers_statuses=get_containers_inspect(CNT_STS_CREATED),
expected_state=ServiceState.STARTING,
),
ExpectedStatus(
containers_statuses=get_containers_inspect(CNT_STS_RUNNING),
expected_state=ServiceState.RUNNING,
),
ExpectedStatus(
containers_statuses=get_containers_inspect(CNT_STS_REMOVING, CNT_STS_EXITED),
expected_state=ServiceState.FAILED,
),
]
def test_running_service_details_make_status(
scheduler_data: SchedulerData, service_message: str, service_state: ServiceState
):
running_service_details = RunningDynamicServiceDetails.from_scheduler_data(
node_uuid=scheduler_data.node_uuid,
scheduler_data=scheduler_data,
service_state=service_state,
service_message=service_message,
)
print(running_service_details)
assert running_service_details
running_service_details_dict = running_service_details.dict(
exclude_unset=True, by_alias=True
)
expected_running_service_details = {
"boot_type": ServiceBootType.V2,
"project_id": scheduler_data.project_id,
"service_state": service_state,
"service_message": service_message,
"service_uuid": scheduler_data.node_uuid,
"service_key": scheduler_data.key,
"service_version": scheduler_data.version,
"service_host": scheduler_data.service_name,
"user_id": scheduler_data.user_id,
"service_port": scheduler_data.service_port,
}
assert running_service_details_dict == expected_running_service_details
def test_all_states_are_mapped():
service_state_defined: set[ServiceState] = _all_states()
comparison_mapped: set[ServiceState] = set(ServiceState.comparison_order().keys())
assert (
service_state_defined == comparison_mapped
), "entries from _COMPARISON_ORDER do not match all states in ServiceState"
def test_equality():
for service_state in _all_states():
assert service_state == ServiceState(service_state.value)
def test_expected_order():
for k, service_state in enumerate(_EXPECTED_ORDER):
prior_states = _EXPECTED_ORDER[:k]
for prior_service_state in prior_states:
assert prior_service_state < service_state
assert prior_service_state != service_state
assert service_state > prior_service_state
def test_min_service_state_is_lowerst_in_expected_order():
for i in range(len(_EXPECTED_ORDER)):
items_after_index = _EXPECTED_ORDER[i:]
assert min(items_after_index) == items_after_index[0]
@pytest.mark.parametrize(
"containers_statuses, expected_state",
[(x.containers_statuses, x.expected_state) for x in SAMPLE_EXPECTED_STATUSES],
ids=[x.expected_state.name for x in SAMPLE_EXPECTED_STATUSES],
)
def test_extract_containers_minimim_statuses(
containers_statuses: list[DockerContainerInspect], expected_state: ServiceState
):
service_state, _ = extract_containers_minimum_statuses(containers_statuses)
assert service_state == expected_state
def test_not_implemented_comparison() -> None:
with pytest.raises(TypeError):
# pylint: disable=pointless-statement
ServiceState.FAILED > {} # type: ignore
def test_regression_legacy_service_compatibility() -> None:
api_response = {
"published_port": None,
"entry_point": "",
"service_uuid": "e5aa2f7a-eac4-4522-bd4f-270b5d8d9fff",
"service_key": "simcore/services/dynamic/mocked",
"service_version": "1.6.10",
"service_host": "mocked_e5aa2f7a-eac4-4522-bd4f-270b5d8d9fff",
"service_port": 8888,
"service_basepath": "/x/e5aa2f7a-eac4-4522-bd4f-270b5d8d9fff",
"service_state": "running",
"service_message": "",
"user_id": "1",
"project_id": "b1ec5c8e-f5bb-11eb-b1d5-02420a000006",
}
service_details = RunningDynamicServiceDetails.parse_obj(api_response)
assert service_details
service_url = f"http://{service_details.host}:{service_details.internal_port}{service_details.basepath}"
assert service_url == service_details.legacy_service_url
| [
"noreply@github.com"
] | pcrespov.noreply@github.com |
550e0c516d9d0c37c13bcb74ddf65fb1572a9598 | 6a2b0db7d6c4ecef8434f3b35fcaef71eeb0d896 | /VENV/py3_venv/lib/python3.6/site-packages/ansible/modules/cloud/docker/docker_swarm_service.py | 722548acda9f5cbfdb8849fb33f6ca025c0a77e9 | [] | no_license | pseudonode/nornircourse | 9bf890ecfadd1a08691f113e0cd2acadd4b9bffa | 1ad0372f9673de784233937cc15779bc2391e267 | refs/heads/master | 2022-11-09T20:18:22.714703 | 2019-10-04T08:06:42 | 2019-10-04T08:06:42 | 211,856,983 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 107,266 | py | #!/usr/bin/python
#
# (c) 2017, Dario Zanzico (git@dariozanzico.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: docker_swarm_service
author:
- "Dario Zanzico (@dariko)"
- "Jason Witkowski (@jwitko)"
- "Hannes Ljungberg (@hannseman)"
short_description: docker swarm service
description:
- Manages docker services via a swarm manager node.
version_added: "2.7"
options:
args:
description:
- List arguments to be passed to the container.
- Corresponds to the C(ARG) parameter of C(docker service create).
type: list
command:
description:
- Command to execute when the container starts.
- A command may be either a string or a list or a list of strings.
- Corresponds to the C(COMMAND) parameter of C(docker service create).
type: raw
version_added: 2.8
configs:
description:
- List of dictionaries describing the service configs.
- Corresponds to the C(--config) option of C(docker service create).
- Requires API version >= 1.30.
type: list
suboptions:
config_id:
description:
- Config's ID.
type: str
required: yes
config_name:
description:
- Config's name as defined at its creation.
type: str
required: yes
filename:
description:
- Name of the file containing the config. Defaults to the I(config_name) if not specified.
type: str
required: yes
uid:
description:
- UID of the config file's owner.
type: str
gid:
description:
- GID of the config file's group.
type: str
mode:
description:
- File access mode inside the container. Must be an octal number (like C(0644) or C(0444)).
type: int
constraints:
description:
- List of the service constraints.
- Corresponds to the C(--constraint) option of C(docker service create).
- Deprecated in 2.8, will be removed in 2.12. Use parameter C(placement.constraints) instead.
type: list
container_labels:
description:
- Dictionary of key value pairs.
- Corresponds to the C(--container-label) option of C(docker service create).
type: dict
dns:
description:
- List of custom DNS servers.
- Corresponds to the C(--dns) option of C(docker service create).
- Requires API version >= 1.25.
type: list
dns_search:
description:
- List of custom DNS search domains.
- Corresponds to the C(--dns-search) option of C(docker service create).
- Requires API version >= 1.25.
type: list
dns_options:
description:
- List of custom DNS options.
- Corresponds to the C(--dns-option) option of C(docker service create).
- Requires API version >= 1.25.
type: list
endpoint_mode:
description:
- Service endpoint mode.
- Corresponds to the C(--endpoint-mode) option of C(docker service create).
- Requires API version >= 1.25.
type: str
choices:
- vip
- dnsrr
env:
description:
- List or dictionary of the service environment variables.
- If passed a list each items need to be in the format of C(KEY=VALUE).
- If passed a dictionary values which might be parsed as numbers,
booleans or other types by the YAML parser must be quoted (e.g. C("true"))
in order to avoid data loss.
- Corresponds to the C(--env) option of C(docker service create).
type: raw
env_files:
description:
- List of paths to files, present on the target, containing environment variables C(FOO=BAR).
- The order of the list is significant in determining the value assigned to a
variable that shows up more than once.
- If variable also present in I(env), then I(env) value will override.
type: list
version_added: "2.8"
force_update:
description:
- Force update even if no changes require it.
- Corresponds to the C(--force) option of C(docker service update).
- Requires API version >= 1.25.
type: bool
default: no
groups:
description:
- List of additional group names and/or IDs that the container process will run as.
- Corresponds to the C(--group) option of C(docker service update).
- Requires API version >= 1.25.
type: list
version_added: "2.8"
healthcheck:
description:
- Configure a check that is run to determine whether or not containers for this service are "healthy".
See the docs for the L(HEALTHCHECK Dockerfile instruction,https://docs.docker.com/engine/reference/builder/#healthcheck)
for details on how healthchecks work.
- "I(interval), I(timeout) and I(start_period) are specified as durations. They accept duration as a string in a format
that look like: C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
- Requires API version >= 1.25.
type: dict
suboptions:
test:
description:
- Command to run to check health.
- Must be either a string or a list. If it is a list, the first item must be one of C(NONE), C(CMD) or C(CMD-SHELL).
type: raw
interval:
description:
- Time between running the check.
type: str
timeout:
description:
- Maximum time to allow one check to run.
type: str
retries:
description:
- Consecutive failures needed to report unhealthy. It accept integer value.
type: int
start_period:
description:
- Start period for the container to initialize before starting health-retries countdown.
type: str
version_added: "2.8"
hostname:
description:
- Container hostname.
- Corresponds to the C(--hostname) option of C(docker service create).
- Requires API version >= 1.25.
type: str
hosts:
description:
- Dict of host-to-IP mappings, where each host name is a key in the dictionary.
Each host name will be added to the container's /etc/hosts file.
- Corresponds to the C(--host) option of C(docker service create).
- Requires API version >= 1.25.
type: dict
version_added: "2.8"
image:
description:
- Service image path and tag.
- Corresponds to the C(IMAGE) parameter of C(docker service create).
type: str
required: yes
labels:
description:
- Dictionary of key value pairs.
- Corresponds to the C(--label) option of C(docker service create).
type: dict
limits:
description:
- Configures service resource limits.
suboptions:
cpus:
description:
- Service CPU limit. C(0) equals no limit.
- Corresponds to the C(--limit-cpu) option of C(docker service create).
type: float
memory:
description:
- "Service memory reservation (format: C(<number>[<unit>])). Number is a positive integer.
Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
C(T) (tebibyte), or C(P) (pebibyte)."
- C(0) equals no reservation.
- Omitting the unit defaults to bytes.
- Corresponds to the C(--reserve-memory) option of C(docker service create).
type: str
type: dict
version_added: "2.8"
limit_cpu:
description:
- Service CPU limit. C(0) equals no limit.
- Corresponds to the C(--limit-cpu) option of C(docker service create).
- Deprecated in 2.8, will be removed in 2.12. Use parameter C(limits.cpus) instead.
type: float
limit_memory:
description:
- "Service memory limit (format: C(<number>[<unit>])). Number is a positive integer.
Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
C(T) (tebibyte), or C(P) (pebibyte)."
- C(0) equals no limit.
- Omitting the unit defaults to bytes.
- Corresponds to the C(--limit-memory) option of C(docker service create).
- Deprecated in 2.8, will be removed in 2.12. Use parameter C(limits.memory) instead.
type: str
logging:
description:
- "Logging configuration for the service."
suboptions:
driver:
description:
- Configure the logging driver for a service.
- Corresponds to the C(--log-driver) option of C(docker service create).
type: str
options:
description:
- Options for service logging driver.
- Corresponds to the C(--log-opt) option of C(docker service create).
type: dict
type: dict
version_added: "2.8"
log_driver:
description:
- Configure the logging driver for a service.
- Corresponds to the C(--log-driver) option of C(docker service create).
- Deprecated in 2.8, will be removed in 2.12. Use parameter C(logging.driver) instead.
type: str
log_driver_options:
description:
- Options for service logging driver.
- Corresponds to the C(--log-opt) option of C(docker service create).
- Deprecated in 2.8, will be removed in 2.12. Use parameter C(logging.options) instead.
type: dict
mode:
description:
- Service replication mode.
- Service will be removed and recreated when changed.
- Corresponds to the C(--mode) option of C(docker service create).
type: str
default: replicated
choices:
- replicated
- global
mounts:
description:
- List of dictionaries describing the service mounts.
- Corresponds to the C(--mount) option of C(docker service create).
type: list
suboptions:
source:
description:
- Mount source (e.g. a volume name or a host path).
type: str
required: yes
target:
description:
- Container path.
type: str
required: yes
type:
description:
- The mount type.
type: str
default: bind
choices:
- bind
- volume
- tmpfs
readonly:
description:
- Whether the mount should be read-only.
type: bool
labels:
description:
- Volume labels to apply.
type: dict
version_added: "2.8"
propagation:
description:
- The propagation mode to use.
- Can only be used when I(mode) is C(bind).
type: str
choices:
- shared
- slave
- private
- rshared
- rslave
- rprivate
version_added: "2.8"
no_copy:
description:
- Disable copying of data from a container when a volume is created.
- Can only be used when I(mode) is C(volume).
type: bool
version_added: "2.8"
driver_config:
description:
- Volume driver configuration.
- Can only be used when I(mode) is C(volume).
suboptions:
name:
description:
- Name of the volume-driver plugin to use for the volume.
type: str
options:
description:
- Options as key-value pairs to pass to the driver for this volume.
type: dict
type: dict
version_added: "2.8"
tmpfs_size:
description:
- "Size of the tmpfs mount (format: C(<number>[<unit>])). Number is a positive integer.
Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
C(T) (tebibyte), or C(P) (pebibyte)."
- Can only be used when I(mode) is C(tmpfs).
type: str
version_added: "2.8"
tmpfs_mode:
description:
- File mode of the tmpfs in octal.
- Can only be used when I(mode) is C(tmpfs).
type: int
version_added: "2.8"
name:
description:
- Service name.
- Corresponds to the C(--name) option of C(docker service create).
type: str
required: yes
networks:
description:
- List of the service networks names.
- Prior to API version 1.29, updating and removing networks is not supported.
If changes are made the service will then be removed and recreated.
- Corresponds to the C(--network) option of C(docker service create).
type: list
placement:
description:
- Configures service placement preferences and constraints.
suboptions:
constraints:
description:
- List of the service constraints.
- Corresponds to the C(--constraint) option of C(docker service create).
type: list
preferences:
description:
- List of the placement preferences as key value pairs.
- Corresponds to the C(--placement-pref) option of C(docker service create).
- Requires API version >= 1.27.
type: list
type: dict
version_added: "2.8"
publish:
description:
- List of dictionaries describing the service published ports.
- Corresponds to the C(--publish) option of C(docker service create).
- Requires API version >= 1.25.
type: list
suboptions:
published_port:
description:
- The port to make externally available.
type: int
required: yes
target_port:
description:
- The port inside the container to expose.
type: int
required: yes
protocol:
description:
- What protocol to use.
type: str
default: tcp
choices:
- tcp
- udp
mode:
description:
- What publish mode to use.
- Requires API version >= 1.32.
type: str
choices:
- ingress
- host
read_only:
description:
- Mount the containers root filesystem as read only.
- Corresponds to the C(--read-only) option of C(docker service create).
type: bool
version_added: "2.8"
replicas:
description:
- Number of containers instantiated in the service. Valid only if I(mode) is C(replicated).
- If set to C(-1), and service is not present, service replicas will be set to C(1).
- If set to C(-1), and service is present, service replicas will be unchanged.
- Corresponds to the C(--replicas) option of C(docker service create).
type: int
default: -1
reservations:
description:
- Configures service resource reservations.
suboptions:
cpus:
description:
- Service CPU reservation. C(0) equals no reservation.
- Corresponds to the C(--reserve-cpu) option of C(docker service create).
type: float
memory:
description:
- "Service memory reservation (format: C(<number>[<unit>])). Number is a positive integer.
Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
C(T) (tebibyte), or C(P) (pebibyte)."
- C(0) equals no reservation.
- Omitting the unit defaults to bytes.
- Corresponds to the C(--reserve-memory) option of C(docker service create).
type: str
type: dict
version_added: "2.8"
reserve_cpu:
description:
- Service CPU reservation. C(0) equals no reservation.
- Corresponds to the C(--reserve-cpu) option of C(docker service create).
- Deprecated in 2.8, will be removed in 2.12. Use parameter C(reservations.cpus) instead.
type: float
reserve_memory:
description:
- "Service memory reservation (format: C(<number>[<unit>])). Number is a positive integer.
Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
C(T) (tebibyte), or C(P) (pebibyte)."
- C(0) equals no reservation.
- Omitting the unit defaults to bytes.
- Corresponds to the C(--reserve-memory) option of C(docker service create).
- Deprecated in 2.8, will be removed in 2.12. Use parameter C(reservations.memory) instead.
type: str
resolve_image:
description:
- If the current image digest should be resolved from registry and updated if changed.
- Requires API version >= 1.30.
type: bool
default: no
version_added: 2.8
restart_config:
description:
- Configures if and how to restart containers when they exit.
suboptions:
condition:
description:
- Restart condition of the service.
- Corresponds to the C(--restart-condition) option of C(docker service create).
type: str
choices:
- none
- on-failure
- any
delay:
description:
- Delay between restarts.
- "Accepts a a string in a format that look like:
C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
- Corresponds to the C(--restart-delay) option of C(docker service create).
type: str
max_attempts:
description:
- Maximum number of service restarts.
- Corresponds to the C(--restart-condition) option of C(docker service create).
type: int
window:
description:
- Restart policy evaluation window.
- "Accepts a string in a format that look like:
C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
- Corresponds to the C(--restart-window) option of C(docker service create).
type: str
type: dict
version_added: "2.8"
restart_policy:
description:
- Restart condition of the service.
- Corresponds to the C(--restart-condition) option of C(docker service create).
- Deprecated in 2.8, will be removed in 2.12. Use parameter C(restart_config.condition) instead.
type: str
choices:
- none
- on-failure
- any
restart_policy_attempts:
description:
- Maximum number of service restarts.
- Corresponds to the C(--restart-condition) option of C(docker service create).
- Deprecated in 2.8, will be removed in 2.12. Use parameter C(restart_config.max_attempts) instead.
type: int
restart_policy_delay:
description:
- Delay between restarts.
- "Accepts a duration as an integer in nanoseconds or as a string in a format that look like:
C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
- Corresponds to the C(--restart-delay) option of C(docker service create).
- Deprecated in 2.8, will be removed in 2.12. Use parameter C(restart_config.delay) instead.
type: raw
restart_policy_window:
description:
- Restart policy evaluation window.
- "Accepts a duration as an integer in nanoseconds or as a string in a format that look like:
C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
- Corresponds to the C(--restart-window) option of C(docker service create).
- Deprecated in 2.8, will be removed in 2.12. Use parameter C(restart_config.window) instead.
type: raw
rollback_config:
description:
- Configures how the service should be rolled back in case of a failing update.
suboptions:
parallelism:
description:
- The number of containers to rollback at a time. If set to 0, all containers rollback simultaneously.
- Corresponds to the C(--rollback-parallelism) option of C(docker service create).
- Requires API version >= 1.28.
type: int
delay:
description:
- Delay between task rollbacks.
- "Accepts a string in a format that look like:
C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
- Corresponds to the C(--rollback-delay) option of C(docker service create).
- Requires API version >= 1.28.
type: str
failure_action:
description:
- Action to take in case of rollback failure.
- Corresponds to the C(--rollback-failure-action) option of C(docker service create).
- Requires API version >= 1.28.
type: str
choices:
- continue
- pause
monitor:
description:
- Duration after each task rollback to monitor for failure.
- "Accepts a string in a format that look like:
C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
- Corresponds to the C(--rollback-monitor) option of C(docker service create).
- Requires API version >= 1.28.
type: str
max_failure_ratio:
description:
- Fraction of tasks that may fail during a rollback.
- Corresponds to the C(--rollback-max-failure-ratio) option of C(docker service create).
- Requires API version >= 1.28.
type: float
order:
description:
- Specifies the order of operations during rollbacks.
- Corresponds to the C(--rollback-order) option of C(docker service create).
- Requires API version >= 1.29.
type: str
choices:
- stop-first
- start-first
type: dict
version_added: "2.8"
secrets:
description:
- List of dictionaries describing the service secrets.
- Corresponds to the C(--secret) option of C(docker service create).
- Requires API version >= 1.25.
type: list
suboptions:
secret_id:
description:
- Secret's ID.
type: str
required: yes
secret_name:
description:
- Secret's name as defined at its creation.
type: str
required: yes
filename:
description:
- Name of the file containing the secret. Defaults to the I(secret_name) if not specified.
type: str
uid:
description:
- UID of the secret file's owner.
type: str
gid:
description:
- GID of the secret file's group.
type: str
mode:
description:
- File access mode inside the container. Must be an octal number (like C(0644) or C(0444)).
type: int
state:
description:
- I(absent) - A service matching the specified name will be removed and have its tasks stopped.
- I(present) - Asserts the existence of a service matching the name and provided configuration parameters.
Unspecified configuration parameters will be set to docker defaults.
type: str
required: yes
default: present
choices:
- present
- absent
stop_grace_period:
description:
- Time to wait before force killing a container.
- "Accepts a duration as a string in a format that look like:
C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
- Corresponds to the C(--stop-grace-period) option of C(docker service create).
type: str
version_added: "2.8"
stop_signal:
description:
- Override default signal used to stop the container.
- Corresponds to the C(--stop-signal) option of C(docker service create).
type: str
version_added: "2.8"
tty:
description:
- Allocate a pseudo-TTY.
- Corresponds to the C(--tty) option of C(docker service create).
- Requires API version >= 1.25.
type: bool
update_config:
description:
- Configures how the service should be updated. Useful for configuring rolling updates.
suboptions:
parallelism:
description:
- Rolling update parallelism.
- Corresponds to the C(--update-parallelism) option of C(docker service create).
type: int
delay:
description:
- Rolling update delay.
- "Accepts a string in a format that look like:
C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
- Corresponds to the C(--update-delay) option of C(docker service create).
type: str
failure_action:
description:
- Action to take in case of container failure.
- Corresponds to the C(--update-failure-action) option of C(docker service create).
- Usage of I(rollback) requires API version >= 1.29.
type: str
choices:
- continue
- pause
- rollback
monitor:
description:
- Time to monitor updated tasks for failures.
- "Accepts a string in a format that look like:
C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
- Corresponds to the C(--update-monitor) option of C(docker service create).
- Requires API version >= 1.25.
type: str
max_failure_ratio:
description:
- Fraction of tasks that may fail during an update before the failure action is invoked.
- Corresponds to the C(--update-max-failure-ratio) option of C(docker service create).
- Requires API version >= 1.25.
type: float
order:
description:
- Specifies the order of operations when rolling out an updated task.
- Corresponds to the C(--update-order) option of C(docker service create).
- Requires API version >= 1.29.
type: str
choices:
- stop-first
- start-first
type: dict
version_added: "2.8"
update_delay:
description:
- Rolling update delay.
- "Accepts a duration as an integer in nanoseconds or as a string in a format that look like:
C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
- Corresponds to the C(--update-delay) option of C(docker service create).
- Before Ansible 2.8, the default value for this option was C(10).
- Deprecated in 2.8, will be removed in 2.12. Use parameter C(update_config.delay) instead.
type: raw
update_parallelism:
description:
- Rolling update parallelism.
- Corresponds to the C(--update-parallelism) option of C(docker service create).
- Before Ansible 2.8, the default value for this option was C(1).
- Deprecated in 2.8, will be removed in 2.12. Use parameter C(update_config.parallelism) instead.
type: int
update_failure_action:
description:
- Action to take in case of container failure.
- Corresponds to the C(--update-failure-action) option of C(docker service create).
- Usage of I(rollback) requires API version >= 1.29.
- Deprecated in 2.8, will be removed in 2.12. Use parameter C(update_config.failure_action) instead.
type: str
choices:
- continue
- pause
- rollback
update_monitor:
description:
- Time to monitor updated tasks for failures.
- "Accepts a duration as an integer in nanoseconds or as a string in a format that look like:
C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
- Corresponds to the C(--update-monitor) option of C(docker service create).
- Requires API version >= 1.25.
- Deprecated in 2.8, will be removed in 2.12. Use parameter C(update_config.monitor) instead.
type: raw
update_max_failure_ratio:
description:
- Fraction of tasks that may fail during an update before the failure action is invoked.
- Corresponds to the C(--update-max-failure-ratio) option of C(docker service create).
- Requires API version >= 1.25.
- Deprecated in 2.8, will be removed in 2.12. Use parameter C(update_config.max_failure_ratio) instead.
type: float
update_order:
description:
- Specifies the order of operations when rolling out an updated task.
- Corresponds to the C(--update-order) option of C(docker service create).
- Requires API version >= 1.29.
- Deprecated in 2.8, will be removed in 2.12. Use parameter C(update_config.order) instead.
type: str
choices:
- stop-first
- start-first
user:
description:
- Sets the username or UID used for the specified command.
- Before Ansible 2.8, the default value for this option was C(root).
- The default has been removed so that the user defined in the image is used if no user is specified here.
- Corresponds to the C(--user) option of C(docker service create).
type: str
working_dir:
description:
- Path to the working directory.
- Corresponds to the C(--workdir) option of C(docker service create).
type: str
version_added: "2.8"
extends_documentation_fragment:
- docker
- docker.docker_py_2_documentation
requirements:
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.0.2"
- "Docker API >= 1.24"
notes:
- "Images will only resolve to the latest digest when using Docker API >= 1.30 and Docker SDK for Python >= 3.2.0.
When using older versions use C(force_update: true) to trigger the swarm to resolve a new image."
'''
RETURN = '''
swarm_service:
returned: always
type: dict
description:
- Dictionary of variables representing the current state of the service.
Matches the module parameters format.
- Note that facts are not part of registered vars but accessible directly.
- Note that before Ansible 2.7.9, the return variable was documented as C(ansible_swarm_service),
while the module actually returned a variable called C(ansible_docker_service). The variable
was renamed to C(swarm_service) in both code and documentation for Ansible 2.7.9 and Ansible 2.8.0.
In Ansible 2.7.x, the old name C(ansible_docker_service) can still be used.
sample: '{
"args": [
"3600"
],
"command": [
"sleep"
],
"configs": null,
"constraints": [
"node.role == manager",
"engine.labels.operatingsystem == ubuntu 14.04"
],
"container_labels": null,
"dns": null,
"dns_options": null,
"dns_search": null,
"endpoint_mode": null,
"env": [
"ENVVAR1=envvar1",
"ENVVAR2=envvar2"
],
"force_update": null,
"groups": null,
"healthcheck": {
"interval": 90000000000,
"retries": 3,
"start_period": 30000000000,
"test": [
"CMD",
"curl",
"--fail",
"http://nginx.host.com"
],
"timeout": 10000000000
},
"healthcheck_disabled": false,
"hostname": null,
"hosts": null,
"image": "alpine:latest@sha256:b3dbf31b77fd99d9c08f780ce6f5282aba076d70a513a8be859d8d3a4d0c92b8",
"labels": {
"com.example.department": "Finance",
"com.example.description": "Accounting webapp"
},
"limit_cpu": 0.5,
"limit_memory": 52428800,
"log_driver": "fluentd",
"log_driver_options": {
"fluentd-address": "127.0.0.1:24224",
"fluentd-async-connect": "true",
"tag": "myservice"
},
"mode": "replicated",
"mounts": [
{
"readonly": false,
"source": "/tmp/",
"target": "/remote_tmp/",
"type": "bind",
"labels": null,
"propagation": null,
"no_copy": null,
"driver_config": null,
"tmpfs_size": null,
"tmpfs_mode": null
}
],
"networks": null,
"placement_preferences": [
{
"spread": "node.labels.mylabel"
}
],
"publish": null,
"read_only": null,
"replicas": 1,
"reserve_cpu": 0.25,
"reserve_memory": 20971520,
"restart_policy": "on-failure",
"restart_policy_attempts": 3,
"restart_policy_delay": 5000000000,
"restart_policy_window": 120000000000,
"secrets": null,
"stop_grace_period": null,
"stop_signal": null,
"tty": null,
"update_delay": 10000000000,
"update_failure_action": null,
"update_max_failure_ratio": null,
"update_monitor": null,
"update_order": "stop-first",
"update_parallelism": 2,
"user": null,
"working_dir": null
}'
changes:
returned: always
description:
- List of changed service attributes if a service has been altered, [] otherwise.
type: list
sample: ['container_labels', 'replicas']
rebuilt:
returned: always
description:
- True if the service has been recreated (removed and created)
type: bool
sample: True
'''
EXAMPLES = '''
- name: Set command and arguments
docker_swarm_service:
name: myservice
image: alpine
command: sleep
args:
- "3600"
- name: Set a bind mount
docker_swarm_service:
name: myservice
image: alpine
mounts:
- source: /tmp/
target: /remote_tmp/
type: bind
- name: Set service labels
docker_swarm_service:
name: myservice
image: alpine
labels:
com.example.description: "Accounting webapp"
com.example.department: "Finance"
- name: Set environment variables
docker_swarm_service:
name: myservice
image: alpine
env:
ENVVAR1: envvar1
ENVVAR2: envvar2
env_files:
- envs/common.env
- envs/apps/web.env
- name: Set fluentd logging
docker_swarm_service:
name: myservice
image: alpine
logging:
driver: fluentd
options:
fluentd-address: "127.0.0.1:24224"
fluentd-async-connect: "true"
tag: myservice
- name: Set restart policies
docker_swarm_service:
name: myservice
image: alpine
restart_config:
condition: on-failure
delay: 5s
max_attempts: 3
window: 120s
- name: Set update config
docker_swarm_service:
name: myservice
image: alpine
update_config:
parallelism: 2
delay: 10s
order: stop-first
- name: Set rollback config
docker_swarm_service:
name: myservice
image: alpine
update_config:
failure_action: rollback
rollback_config:
parallelism: 2
delay: 10s
order: stop-first
- name: Set placement preferences
docker_swarm_service:
name: myservice
image: alpine:edge
placement:
preferences:
- spread: node.labels.mylabel
constraints:
- node.role == manager
- engine.labels.operatingsystem == ubuntu 14.04
- name: Set configs
docker_swarm_service:
name: myservice
image: alpine:edge
configs:
- config_id: myconfig_id
config_name: myconfig_name
filename: "/tmp/config.txt"
- name: Set networks
docker_swarm_service:
name: myservice
image: alpine:edge
networks:
- mynetwork
- name: Set secrets
docker_swarm_service:
name: myservice
image: alpine:edge
secrets:
- secret_id: mysecret_id
secret_name: mysecret_name
filename: "/run/secrets/secret.txt"
- name: Start service with healthcheck
docker_swarm_service:
name: myservice
image: nginx:1.13
healthcheck:
# Check if nginx server is healthy by curl'ing the server.
# If this fails or timeouts, the healthcheck fails.
test: ["CMD", "curl", "--fail", "http://nginx.host.com"]
interval: 1m30s
timeout: 10s
retries: 3
start_period: 30s
- name: Configure service resources
docker_swarm_service:
name: myservice
image: alpine:edge
reservations:
cpus: 0.25
memory: 20M
limits:
cpus: 0.50
memory: 50M
- name: Remove service
docker_swarm_service:
name: myservice
state: absent
'''
import shlex
import time
import operator
import traceback
from distutils.version import LooseVersion
from ansible.module_utils.docker.common import (
AnsibleDockerClient,
DifferenceTracker,
DockerBaseClass,
convert_duration_to_nanosecond,
parse_healthcheck,
RequestException,
)
from ansible.module_utils.basic import human_to_bytes
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_text
try:
from docker import types
from docker.utils import (
parse_repository_tag,
parse_env_file,
format_environment,
)
from docker.errors import (
APIError,
DockerException,
NotFound,
)
except ImportError:
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
pass
def get_docker_environment(env, env_files):
"""
Will return a list of "KEY=VALUE" items. Supplied env variable can
be either a list or a dictionary.
If environment files are combined with explicit environment variables,
the explicit environment variables take precedence.
"""
env_dict = {}
if env_files:
for env_file in env_files:
parsed_env_file = parse_env_file(env_file)
for name, value in parsed_env_file.items():
env_dict[name] = str(value)
if env is not None and isinstance(env, string_types):
env = env.split(',')
if env is not None and isinstance(env, dict):
for name, value in env.items():
if not isinstance(value, string_types):
raise ValueError(
'Non-string value found for env option. '
'Ambiguous env options must be wrapped in quotes to avoid YAML parsing. Key: %s' % name
)
env_dict[name] = str(value)
elif env is not None and isinstance(env, list):
for item in env:
try:
name, value = item.split('=', 1)
except ValueError:
raise ValueError('Invalid environment variable found in list, needs to be in format KEY=VALUE.')
env_dict[name] = value
elif env is not None:
raise ValueError(
'Invalid type for env %s (%s). Only list or dict allowed.' % (env, type(env))
)
env_list = format_environment(env_dict)
if not env_list:
if env is not None or env_files is not None:
return []
else:
return None
return sorted(env_list)
def get_nanoseconds_from_raw_option(name, value):
if value is None:
return None
elif isinstance(value, int):
return value
elif isinstance(value, string_types):
try:
return int(value)
except ValueError:
return convert_duration_to_nanosecond(value)
else:
raise ValueError(
'Invalid type for %s %s (%s). Only string or int allowed.'
% (name, value, type(value))
)
def get_value(key, values, default=None):
value = values.get(key)
return value if value is not None else default
def has_dict_changed(new_dict, old_dict):
"""
Check if new_dict has differences compared to old_dict while
ignoring keys in old_dict which are None in new_dict.
"""
if new_dict is None:
return False
if not new_dict and old_dict:
return True
if not old_dict and new_dict:
return True
defined_options = dict(
(option, value) for option, value in new_dict.items()
if value is not None
)
for option, value in defined_options.items():
if value != old_dict.get(option):
return True
return False
def has_list_of_dicts_changed(new_list, old_list):
"""
Check two lists of dicts has differences.
"""
if new_list is None:
return False
old_list = old_list or []
if len(new_list) != len(old_list):
return True
for new_item, old_item in zip(new_list, old_list):
if has_dict_changed(new_item, old_item):
return True
return False
class DockerService(DockerBaseClass):
def __init__(self):
super(DockerService, self).__init__()
self.image = ""
self.command = None
self.args = None
self.endpoint_mode = None
self.dns = None
self.healthcheck = None
self.healthcheck_disabled = None
self.hostname = None
self.hosts = None
self.tty = None
self.dns_search = None
self.dns_options = None
self.env = None
self.force_update = None
self.groups = None
self.log_driver = None
self.log_driver_options = None
self.labels = None
self.container_labels = None
self.limit_cpu = None
self.limit_memory = None
self.reserve_cpu = None
self.reserve_memory = None
self.mode = "replicated"
self.user = None
self.mounts = None
self.configs = None
self.secrets = None
self.constraints = None
self.networks = None
self.stop_grace_period = None
self.stop_signal = None
self.publish = None
self.placement_preferences = None
self.replicas = -1
self.service_id = False
self.service_version = False
self.read_only = None
self.restart_policy = None
self.restart_policy_attempts = None
self.restart_policy_delay = None
self.restart_policy_window = None
self.rollback_config = None
self.update_delay = None
self.update_parallelism = None
self.update_failure_action = None
self.update_monitor = None
self.update_max_failure_ratio = None
self.update_order = None
self.working_dir = None
self.can_update_networks = None
def get_facts(self):
return {
'image': self.image,
'mounts': self.mounts,
'configs': self.configs,
'networks': self.networks,
'command': self.command,
'args': self.args,
'tty': self.tty,
'dns': self.dns,
'dns_search': self.dns_search,
'dns_options': self.dns_options,
'healthcheck': self.healthcheck,
'healthcheck_disabled': self.healthcheck_disabled,
'hostname': self.hostname,
'hosts': self.hosts,
'env': self.env,
'force_update': self.force_update,
'groups': self.groups,
'log_driver': self.log_driver,
'log_driver_options': self.log_driver_options,
'publish': self.publish,
'constraints': self.constraints,
'placement_preferences': self.placement_preferences,
'labels': self.labels,
'container_labels': self.container_labels,
'mode': self.mode,
'replicas': self.replicas,
'endpoint_mode': self.endpoint_mode,
'restart_policy': self.restart_policy,
'secrets': self.secrets,
'stop_grace_period': self.stop_grace_period,
'stop_signal': self.stop_signal,
'limit_cpu': self.limit_cpu,
'limit_memory': self.limit_memory,
'read_only': self.read_only,
'reserve_cpu': self.reserve_cpu,
'reserve_memory': self.reserve_memory,
'restart_policy_delay': self.restart_policy_delay,
'restart_policy_attempts': self.restart_policy_attempts,
'restart_policy_window': self.restart_policy_window,
'rollback_config': self.rollback_config,
'update_delay': self.update_delay,
'update_parallelism': self.update_parallelism,
'update_failure_action': self.update_failure_action,
'update_monitor': self.update_monitor,
'update_max_failure_ratio': self.update_max_failure_ratio,
'update_order': self.update_order,
'user': self.user,
'working_dir': self.working_dir,
}
@staticmethod
def get_restart_config_from_ansible_params(params):
restart_config = params['restart_config'] or {}
condition = get_value(
'condition',
restart_config,
default=params['restart_policy']
)
delay = get_value(
'delay',
restart_config,
default=params['restart_policy_delay']
)
delay = get_nanoseconds_from_raw_option(
'restart_policy_delay',
delay
)
max_attempts = get_value(
'max_attempts',
restart_config,
default=params['restart_policy_attempts']
)
window = get_value(
'window',
restart_config,
default=params['restart_policy_window']
)
window = get_nanoseconds_from_raw_option(
'restart_policy_window',
window
)
return {
'restart_policy': condition,
'restart_policy_delay': delay,
'restart_policy_attempts': max_attempts,
'restart_policy_window': window
}
@staticmethod
def get_update_config_from_ansible_params(params):
update_config = params['update_config'] or {}
parallelism = get_value(
'parallelism',
update_config,
default=params['update_parallelism']
)
delay = get_value(
'delay',
update_config,
default=params['update_delay']
)
delay = get_nanoseconds_from_raw_option(
'update_delay',
delay
)
failure_action = get_value(
'failure_action',
update_config,
default=params['update_failure_action']
)
monitor = get_value(
'monitor',
update_config,
default=params['update_monitor']
)
monitor = get_nanoseconds_from_raw_option(
'update_monitor',
monitor
)
max_failure_ratio = get_value(
'max_failure_ratio',
update_config,
default=params['update_max_failure_ratio']
)
order = get_value(
'order',
update_config,
default=params['update_order']
)
return {
'update_parallelism': parallelism,
'update_delay': delay,
'update_failure_action': failure_action,
'update_monitor': monitor,
'update_max_failure_ratio': max_failure_ratio,
'update_order': order
}
@staticmethod
def get_rollback_config_from_ansible_params(params):
if params['rollback_config'] is None:
return None
rollback_config = params['rollback_config'] or {}
delay = get_nanoseconds_from_raw_option(
'rollback_config.delay',
rollback_config.get('delay')
)
monitor = get_nanoseconds_from_raw_option(
'rollback_config.monitor',
rollback_config.get('monitor')
)
return {
'parallelism': rollback_config.get('parallelism'),
'delay': delay,
'failure_action': rollback_config.get('failure_action'),
'monitor': monitor,
'max_failure_ratio': rollback_config.get('max_failure_ratio'),
'order': rollback_config.get('order'),
}
@staticmethod
def get_logging_from_ansible_params(params):
logging_config = params['logging'] or {}
driver = get_value(
'driver',
logging_config,
default=params['log_driver']
)
options = get_value(
'options',
logging_config,
default=params['log_driver_options']
)
return {
'log_driver': driver,
'log_driver_options': options,
}
@staticmethod
def get_limits_from_ansible_params(params):
limits = params['limits'] or {}
cpus = get_value(
'cpus',
limits,
default=params['limit_cpu']
)
memory = get_value(
'memory',
limits,
default=params['limit_memory']
)
if memory is not None:
try:
memory = human_to_bytes(memory)
except ValueError as exc:
raise Exception('Failed to convert limit_memory to bytes: %s' % exc)
return {
'limit_cpu': cpus,
'limit_memory': memory,
}
@staticmethod
def get_reservations_from_ansible_params(params):
reservations = params['reservations'] or {}
cpus = get_value(
'cpus',
reservations,
default=params['reserve_cpu']
)
memory = get_value(
'memory',
reservations,
default=params['reserve_memory']
)
if memory is not None:
try:
memory = human_to_bytes(memory)
except ValueError as exc:
raise Exception('Failed to convert reserve_memory to bytes: %s' % exc)
return {
'reserve_cpu': cpus,
'reserve_memory': memory,
}
@staticmethod
def get_placement_from_ansible_params(params):
placement = params['placement'] or {}
constraints = get_value(
'constraints',
placement,
default=params['constraints']
)
preferences = placement.get('preferences')
return {
'constraints': constraints,
'placement_preferences': preferences,
}
@classmethod
def from_ansible_params(cls, ap, old_service, image_digest, can_update_networks):
s = DockerService()
s.image = image_digest
s.can_update_networks = can_update_networks
s.args = ap['args']
s.endpoint_mode = ap['endpoint_mode']
s.dns = ap['dns']
s.dns_search = ap['dns_search']
s.dns_options = ap['dns_options']
s.healthcheck, s.healthcheck_disabled = parse_healthcheck(ap['healthcheck'])
s.hostname = ap['hostname']
s.hosts = ap['hosts']
s.tty = ap['tty']
s.labels = ap['labels']
s.container_labels = ap['container_labels']
s.mode = ap['mode']
s.networks = ap['networks']
s.stop_signal = ap['stop_signal']
s.user = ap['user']
s.working_dir = ap['working_dir']
s.read_only = ap['read_only']
s.command = ap['command']
if isinstance(s.command, string_types):
s.command = shlex.split(s.command)
elif isinstance(s.command, list):
invalid_items = [
(index, item)
for index, item in enumerate(s.command)
if not isinstance(item, string_types)
]
if invalid_items:
errors = ', '.join(
[
'%s (%s) at index %s' % (item, type(item), index)
for index, item in invalid_items
]
)
raise Exception(
'All items in a command list need to be strings. '
'Check quoting. Invalid items: %s.'
% errors
)
s.command = ap['command']
elif s.command is not None:
raise ValueError(
'Invalid type for command %s (%s). '
'Only string or list allowed. Check quoting.'
% (s.command, type(s.command))
)
s.env = get_docker_environment(ap['env'], ap['env_files'])
s.rollback_config = cls.get_rollback_config_from_ansible_params(ap)
update_config = cls.get_update_config_from_ansible_params(ap)
for key, value in update_config.items():
setattr(s, key, value)
restart_config = cls.get_restart_config_from_ansible_params(ap)
for key, value in restart_config.items():
setattr(s, key, value)
logging_config = cls.get_logging_from_ansible_params(ap)
for key, value in logging_config.items():
setattr(s, key, value)
limits = cls.get_limits_from_ansible_params(ap)
for key, value in limits.items():
setattr(s, key, value)
reservations = cls.get_reservations_from_ansible_params(ap)
for key, value in reservations.items():
setattr(s, key, value)
placement = cls.get_placement_from_ansible_params(ap)
for key, value in placement.items():
setattr(s, key, value)
if ap['stop_grace_period'] is not None:
s.stop_grace_period = convert_duration_to_nanosecond(ap['stop_grace_period'])
if ap['force_update']:
s.force_update = int(str(time.time()).replace('.', ''))
if ap['groups'] is not None:
# In case integers are passed as groups, we need to convert them to
# strings as docker internally treats them as strings.
s.groups = [str(g) for g in ap['groups']]
if ap['replicas'] == -1:
if old_service:
s.replicas = old_service.replicas
else:
s.replicas = 1
else:
s.replicas = ap['replicas']
if ap['publish'] is not None:
s.publish = []
for param_p in ap['publish']:
service_p = {}
service_p['protocol'] = param_p['protocol']
service_p['mode'] = param_p['mode']
service_p['published_port'] = param_p['published_port']
service_p['target_port'] = param_p['target_port']
s.publish.append(service_p)
if ap['mounts'] is not None:
s.mounts = []
for param_m in ap['mounts']:
service_m = {}
service_m['readonly'] = param_m['readonly']
service_m['type'] = param_m['type']
service_m['source'] = param_m['source']
service_m['target'] = param_m['target']
service_m['labels'] = param_m['labels']
service_m['no_copy'] = param_m['no_copy']
service_m['propagation'] = param_m['propagation']
service_m['driver_config'] = param_m['driver_config']
service_m['tmpfs_mode'] = param_m['tmpfs_mode']
tmpfs_size = param_m['tmpfs_size']
if tmpfs_size is not None:
try:
tmpfs_size = human_to_bytes(tmpfs_size)
except ValueError as exc:
raise ValueError(
'Failed to convert tmpfs_size to bytes: %s' % exc
)
service_m['tmpfs_size'] = tmpfs_size
s.mounts.append(service_m)
if ap['configs'] is not None:
s.configs = []
for param_m in ap['configs']:
service_c = {}
service_c['config_id'] = param_m['config_id']
service_c['config_name'] = param_m['config_name']
service_c['filename'] = param_m['filename'] or service_c['config_name']
service_c['uid'] = param_m['uid']
service_c['gid'] = param_m['gid']
service_c['mode'] = param_m['mode']
s.configs.append(service_c)
if ap['secrets'] is not None:
s.secrets = []
for param_m in ap['secrets']:
service_s = {}
service_s['secret_id'] = param_m['secret_id']
service_s['secret_name'] = param_m['secret_name']
service_s['filename'] = param_m['filename'] or service_s['secret_name']
service_s['uid'] = param_m['uid']
service_s['gid'] = param_m['gid']
service_s['mode'] = param_m['mode']
s.secrets.append(service_s)
return s
def compare(self, os):
differences = DifferenceTracker()
needs_rebuild = False
force_update = False
if self.endpoint_mode is not None and self.endpoint_mode != os.endpoint_mode:
differences.add('endpoint_mode', parameter=self.endpoint_mode, active=os.endpoint_mode)
if self.env is not None and self.env != (os.env or []):
differences.add('env', parameter=self.env, active=os.env)
if self.log_driver is not None and self.log_driver != os.log_driver:
differences.add('log_driver', parameter=self.log_driver, active=os.log_driver)
if self.log_driver_options is not None and self.log_driver_options != (os.log_driver_options or {}):
differences.add('log_opt', parameter=self.log_driver_options, active=os.log_driver_options)
if self.mode != os.mode:
needs_rebuild = True
differences.add('mode', parameter=self.mode, active=os.mode)
if has_list_of_dicts_changed(self.mounts, os.mounts):
differences.add('mounts', parameter=self.mounts, active=os.mounts)
if has_list_of_dicts_changed(self.configs, os.configs):
differences.add('configs', parameter=self.configs, active=os.configs)
if has_list_of_dicts_changed(self.secrets, os.secrets):
differences.add('secrets', parameter=self.secrets, active=os.secrets)
if self.networks is not None and self.networks != (os.networks or []):
differences.add('networks', parameter=self.networks, active=os.networks)
needs_rebuild = not self.can_update_networks
if self.replicas != os.replicas:
differences.add('replicas', parameter=self.replicas, active=os.replicas)
if self.command is not None and self.command != (os.command or []):
differences.add('command', parameter=self.command, active=os.command)
if self.args is not None and self.args != (os.args or []):
differences.add('args', parameter=self.args, active=os.args)
if self.constraints is not None and self.constraints != (os.constraints or []):
differences.add('constraints', parameter=self.constraints, active=os.constraints)
if self.placement_preferences is not None and self.placement_preferences != (os.placement_preferences or []):
differences.add('placement_preferences', parameter=self.placement_preferences, active=os.placement_preferences)
if self.groups is not None and self.groups != (os.groups or []):
differences.add('groups', parameter=self.groups, active=os.groups)
if self.labels is not None and self.labels != (os.labels or {}):
differences.add('labels', parameter=self.labels, active=os.labels)
if self.limit_cpu is not None and self.limit_cpu != os.limit_cpu:
differences.add('limit_cpu', parameter=self.limit_cpu, active=os.limit_cpu)
if self.limit_memory is not None and self.limit_memory != os.limit_memory:
differences.add('limit_memory', parameter=self.limit_memory, active=os.limit_memory)
if self.reserve_cpu is not None and self.reserve_cpu != os.reserve_cpu:
differences.add('reserve_cpu', parameter=self.reserve_cpu, active=os.reserve_cpu)
if self.reserve_memory is not None and self.reserve_memory != os.reserve_memory:
differences.add('reserve_memory', parameter=self.reserve_memory, active=os.reserve_memory)
if self.container_labels is not None and self.container_labels != (os.container_labels or {}):
differences.add('container_labels', parameter=self.container_labels, active=os.container_labels)
if self.stop_signal is not None and self.stop_signal != os.stop_signal:
differences.add('stop_signal', parameter=self.stop_signal, active=os.stop_signal)
if self.stop_grace_period is not None and self.stop_grace_period != os.stop_grace_period:
differences.add('stop_grace_period', parameter=self.stop_grace_period, active=os.stop_grace_period)
if self.has_publish_changed(os.publish):
differences.add('publish', parameter=self.publish, active=os.publish)
if self.read_only is not None and self.read_only != os.read_only:
differences.add('read_only', parameter=self.read_only, active=os.read_only)
if self.restart_policy is not None and self.restart_policy != os.restart_policy:
differences.add('restart_policy', parameter=self.restart_policy, active=os.restart_policy)
if self.restart_policy_attempts is not None and self.restart_policy_attempts != os.restart_policy_attempts:
differences.add('restart_policy_attempts', parameter=self.restart_policy_attempts, active=os.restart_policy_attempts)
if self.restart_policy_delay is not None and self.restart_policy_delay != os.restart_policy_delay:
differences.add('restart_policy_delay', parameter=self.restart_policy_delay, active=os.restart_policy_delay)
if self.restart_policy_window is not None and self.restart_policy_window != os.restart_policy_window:
differences.add('restart_policy_window', parameter=self.restart_policy_window, active=os.restart_policy_window)
if has_dict_changed(self.rollback_config, os.rollback_config):
differences.add('rollback_config', parameter=self.rollback_config, active=os.rollback_config)
if self.update_delay is not None and self.update_delay != os.update_delay:
differences.add('update_delay', parameter=self.update_delay, active=os.update_delay)
if self.update_parallelism is not None and self.update_parallelism != os.update_parallelism:
differences.add('update_parallelism', parameter=self.update_parallelism, active=os.update_parallelism)
if self.update_failure_action is not None and self.update_failure_action != os.update_failure_action:
differences.add('update_failure_action', parameter=self.update_failure_action, active=os.update_failure_action)
if self.update_monitor is not None and self.update_monitor != os.update_monitor:
differences.add('update_monitor', parameter=self.update_monitor, active=os.update_monitor)
if self.update_max_failure_ratio is not None and self.update_max_failure_ratio != os.update_max_failure_ratio:
differences.add('update_max_failure_ratio', parameter=self.update_max_failure_ratio, active=os.update_max_failure_ratio)
if self.update_order is not None and self.update_order != os.update_order:
differences.add('update_order', parameter=self.update_order, active=os.update_order)
has_image_changed, change = self.has_image_changed(os.image)
if has_image_changed:
differences.add('image', parameter=self.image, active=change)
if self.user and self.user != os.user:
differences.add('user', parameter=self.user, active=os.user)
if self.dns is not None and self.dns != (os.dns or []):
differences.add('dns', parameter=self.dns, active=os.dns)
if self.dns_search is not None and self.dns_search != (os.dns_search or []):
differences.add('dns_search', parameter=self.dns_search, active=os.dns_search)
if self.dns_options is not None and self.dns_options != (os.dns_options or []):
differences.add('dns_options', parameter=self.dns_options, active=os.dns_options)
if self.has_healthcheck_changed(os):
differences.add('healthcheck', parameter=self.healthcheck, active=os.healthcheck)
if self.hostname is not None and self.hostname != os.hostname:
differences.add('hostname', parameter=self.hostname, active=os.hostname)
if self.hosts is not None and self.hosts != (os.hosts or {}):
differences.add('hosts', parameter=self.hosts, active=os.hosts)
if self.tty is not None and self.tty != os.tty:
differences.add('tty', parameter=self.tty, active=os.tty)
if self.working_dir is not None and self.working_dir != os.working_dir:
differences.add('working_dir', parameter=self.working_dir, active=os.working_dir)
if self.force_update:
force_update = True
return not differences.empty or force_update, differences, needs_rebuild, force_update
def has_healthcheck_changed(self, old_publish):
if self.healthcheck_disabled is False and self.healthcheck is None:
return False
if self.healthcheck_disabled and old_publish.healthcheck is None:
return False
return self.healthcheck != old_publish.healthcheck
def has_publish_changed(self, old_publish):
if self.publish is None:
return False
old_publish = old_publish or []
if len(self.publish) != len(old_publish):
return True
publish_sorter = operator.itemgetter('published_port', 'target_port', 'protocol')
publish = sorted(self.publish, key=publish_sorter)
old_publish = sorted(old_publish, key=publish_sorter)
for publish_item, old_publish_item in zip(publish, old_publish):
ignored_keys = set()
if not publish_item.get('mode'):
ignored_keys.add('mode')
# Create copies of publish_item dicts where keys specified in ignored_keys are left out
filtered_old_publish_item = dict(
(k, v) for k, v in old_publish_item.items() if k not in ignored_keys
)
filtered_publish_item = dict(
(k, v) for k, v in publish_item.items() if k not in ignored_keys
)
if filtered_publish_item != filtered_old_publish_item:
return True
return False
def has_image_changed(self, old_image):
if '@' not in self.image:
old_image = old_image.split('@')[0]
return self.image != old_image, old_image
def __str__(self):
return str({
'mode': self.mode,
'env': self.env,
'endpoint_mode': self.endpoint_mode,
'mounts': self.mounts,
'configs': self.configs,
'secrets': self.secrets,
'networks': self.networks,
'replicas': self.replicas
})
def build_container_spec(self):
mounts = None
if self.mounts is not None:
mounts = []
for mount_config in self.mounts:
mount_options = {
'target': 'target',
'source': 'source',
'type': 'type',
'readonly': 'read_only',
'propagation': 'propagation',
'labels': 'labels',
'no_copy': 'no_copy',
'driver_config': 'driver_config',
'tmpfs_size': 'tmpfs_size',
'tmpfs_mode': 'tmpfs_mode'
}
mount_args = {}
for option, mount_arg in mount_options.items():
value = mount_config.get(option)
if value is not None:
mount_args[mount_arg] = value
mounts.append(types.Mount(**mount_args))
configs = None
if self.configs is not None:
configs = []
for config_config in self.configs:
config_args = {
'config_id': config_config['config_id'],
'config_name': config_config['config_name']
}
filename = config_config.get('filename')
if filename:
config_args['filename'] = filename
uid = config_config.get('uid')
if uid:
config_args['uid'] = uid
gid = config_config.get('gid')
if gid:
config_args['gid'] = gid
mode = config_config.get('mode')
if mode:
config_args['mode'] = mode
configs.append(types.ConfigReference(**config_args))
secrets = None
if self.secrets is not None:
secrets = []
for secret_config in self.secrets:
secret_args = {
'secret_id': secret_config['secret_id'],
'secret_name': secret_config['secret_name']
}
filename = secret_config.get('filename')
if filename:
secret_args['filename'] = filename
uid = secret_config.get('uid')
if uid:
secret_args['uid'] = uid
gid = secret_config.get('gid')
if gid:
secret_args['gid'] = gid
mode = secret_config.get('mode')
if mode:
secret_args['mode'] = mode
secrets.append(types.SecretReference(**secret_args))
dns_config_args = {}
if self.dns is not None:
dns_config_args['nameservers'] = self.dns
if self.dns_search is not None:
dns_config_args['search'] = self.dns_search
if self.dns_options is not None:
dns_config_args['options'] = self.dns_options
dns_config = types.DNSConfig(**dns_config_args) if dns_config_args else None
container_spec_args = {}
if self.command is not None:
container_spec_args['command'] = self.command
if self.args is not None:
container_spec_args['args'] = self.args
if self.env is not None:
container_spec_args['env'] = self.env
if self.user is not None:
container_spec_args['user'] = self.user
if self.container_labels is not None:
container_spec_args['labels'] = self.container_labels
if self.healthcheck is not None:
container_spec_args['healthcheck'] = types.Healthcheck(**self.healthcheck)
if self.hostname is not None:
container_spec_args['hostname'] = self.hostname
if self.hosts is not None:
container_spec_args['hosts'] = self.hosts
if self.read_only is not None:
container_spec_args['read_only'] = self.read_only
if self.stop_grace_period is not None:
container_spec_args['stop_grace_period'] = self.stop_grace_period
if self.stop_signal is not None:
container_spec_args['stop_signal'] = self.stop_signal
if self.tty is not None:
container_spec_args['tty'] = self.tty
if self.groups is not None:
container_spec_args['groups'] = self.groups
if self.working_dir is not None:
container_spec_args['workdir'] = self.working_dir
if secrets is not None:
container_spec_args['secrets'] = secrets
if mounts is not None:
container_spec_args['mounts'] = mounts
if dns_config is not None:
container_spec_args['dns_config'] = dns_config
if configs is not None:
container_spec_args['configs'] = configs
return types.ContainerSpec(self.image, **container_spec_args)
def build_placement(self):
placement_args = {}
if self.constraints is not None:
placement_args['constraints'] = self.constraints
if self.placement_preferences is not None:
placement_args['preferences'] = [
{key.title(): {'SpreadDescriptor': value}}
for preference in self.placement_preferences
for key, value in preference.items()
]
return types.Placement(**placement_args) if placement_args else None
def build_update_config(self):
update_config_args = {}
if self.update_parallelism is not None:
update_config_args['parallelism'] = self.update_parallelism
if self.update_delay is not None:
update_config_args['delay'] = self.update_delay
if self.update_failure_action is not None:
update_config_args['failure_action'] = self.update_failure_action
if self.update_monitor is not None:
update_config_args['monitor'] = self.update_monitor
if self.update_max_failure_ratio is not None:
update_config_args['max_failure_ratio'] = self.update_max_failure_ratio
if self.update_order is not None:
update_config_args['order'] = self.update_order
return types.UpdateConfig(**update_config_args) if update_config_args else None
def build_log_driver(self):
log_driver_args = {}
if self.log_driver is not None:
log_driver_args['name'] = self.log_driver
if self.log_driver_options is not None:
log_driver_args['options'] = self.log_driver_options
return types.DriverConfig(**log_driver_args) if log_driver_args else None
def build_restart_policy(self):
restart_policy_args = {}
if self.restart_policy is not None:
restart_policy_args['condition'] = self.restart_policy
if self.restart_policy_delay is not None:
restart_policy_args['delay'] = self.restart_policy_delay
if self.restart_policy_attempts is not None:
restart_policy_args['max_attempts'] = self.restart_policy_attempts
if self.restart_policy_window is not None:
restart_policy_args['window'] = self.restart_policy_window
return types.RestartPolicy(**restart_policy_args) if restart_policy_args else None
def build_rollback_config(self):
if self.rollback_config is None:
return None
rollback_config_options = [
'parallelism',
'delay',
'failure_action',
'monitor',
'max_failure_ratio',
'order',
]
rollback_config_args = {}
for option in rollback_config_options:
value = self.rollback_config.get(option)
if value is not None:
rollback_config_args[option] = value
return types.RollbackConfig(**rollback_config_args) if rollback_config_args else None
def build_resources(self):
resources_args = {}
if self.limit_cpu is not None:
resources_args['cpu_limit'] = int(self.limit_cpu * 1000000000.0)
if self.limit_memory is not None:
resources_args['mem_limit'] = self.limit_memory
if self.reserve_cpu is not None:
resources_args['cpu_reservation'] = int(self.reserve_cpu * 1000000000.0)
if self.reserve_memory is not None:
resources_args['mem_reservation'] = self.reserve_memory
return types.Resources(**resources_args) if resources_args else None
def build_task_template(self, container_spec, placement=None):
log_driver = self.build_log_driver()
restart_policy = self.build_restart_policy()
resources = self.build_resources()
task_template_args = {}
if placement is not None:
task_template_args['placement'] = placement
if log_driver is not None:
task_template_args['log_driver'] = log_driver
if restart_policy is not None:
task_template_args['restart_policy'] = restart_policy
if resources is not None:
task_template_args['resources'] = resources
if self.force_update:
task_template_args['force_update'] = self.force_update
return types.TaskTemplate(container_spec=container_spec, **task_template_args)
def build_service_mode(self):
if self.mode == 'global':
self.replicas = None
return types.ServiceMode(self.mode, replicas=self.replicas)
def build_networks(self, docker_networks):
networks = None
if self.networks is not None:
networks = []
for network_name in self.networks:
network_id = None
try:
network_id = list(
filter(lambda n: n['name'] == network_name, docker_networks)
)[0]['id']
except (IndexError, KeyError):
pass
if network_id:
networks.append({'Target': network_id})
else:
raise Exception('no docker networks named: %s' % network_name)
return networks
def build_endpoint_spec(self):
endpoint_spec_args = {}
if self.publish is not None:
ports = {}
for port in self.publish:
if port.get('mode'):
ports[port['published_port']] = (
port['target_port'],
port['protocol'],
port['mode'],
)
else:
ports[port['published_port']] = (
port['target_port'],
port['protocol'],
)
endpoint_spec_args['ports'] = ports
if self.endpoint_mode is not None:
endpoint_spec_args['mode'] = self.endpoint_mode
return types.EndpointSpec(**endpoint_spec_args) if endpoint_spec_args else None
def build_docker_service(self, docker_networks):
container_spec = self.build_container_spec()
placement = self.build_placement()
task_template = self.build_task_template(container_spec, placement)
update_config = self.build_update_config()
rollback_config = self.build_rollback_config()
service_mode = self.build_service_mode()
networks = self.build_networks(docker_networks)
endpoint_spec = self.build_endpoint_spec()
service = {'task_template': task_template, 'mode': service_mode}
if update_config:
service['update_config'] = update_config
if rollback_config:
service['rollback_config'] = rollback_config
if networks:
service['networks'] = networks
if endpoint_spec:
service['endpoint_spec'] = endpoint_spec
if self.labels:
service['labels'] = self.labels
return service
class DockerServiceManager(object):
def __init__(self, client):
self.client = client
self.retries = 2
self.diff_tracker = None
def get_networks_names_ids(self):
return [{'name': n['Name'], 'id': n['Id']} for n in self.client.networks()]
def get_service(self, name):
try:
raw_data = self.client.inspect_service(name)
except NotFound:
return None
ds = DockerService()
task_template_data = raw_data['Spec']['TaskTemplate']
ds.image = task_template_data['ContainerSpec']['Image']
ds.user = task_template_data['ContainerSpec'].get('User')
ds.env = task_template_data['ContainerSpec'].get('Env')
ds.command = task_template_data['ContainerSpec'].get('Command')
ds.args = task_template_data['ContainerSpec'].get('Args')
ds.groups = task_template_data['ContainerSpec'].get('Groups')
ds.stop_grace_period = task_template_data['ContainerSpec'].get('StopGracePeriod')
ds.stop_signal = task_template_data['ContainerSpec'].get('StopSignal')
ds.working_dir = task_template_data['ContainerSpec'].get('Dir')
ds.read_only = task_template_data['ContainerSpec'].get('ReadOnly')
healthcheck_data = task_template_data['ContainerSpec'].get('Healthcheck')
if healthcheck_data:
options = ['test', 'interval', 'timeout', 'start_period', 'retries']
healthcheck = dict(
(key.lower(), value) for key, value in healthcheck_data.items()
if value is not None and key.lower() in options
)
ds.healthcheck = healthcheck
update_config_data = raw_data['Spec'].get('UpdateConfig')
if update_config_data:
ds.update_delay = update_config_data.get('Delay')
ds.update_parallelism = update_config_data.get('Parallelism')
ds.update_failure_action = update_config_data.get('FailureAction')
ds.update_monitor = update_config_data.get('Monitor')
ds.update_max_failure_ratio = update_config_data.get('MaxFailureRatio')
ds.update_order = update_config_data.get('Order')
rollback_config_data = raw_data['Spec'].get('RollbackConfig')
if rollback_config_data:
ds.rollback_config = {
'parallelism': rollback_config_data.get('Parallelism'),
'delay': rollback_config_data.get('Delay'),
'failure_action': rollback_config_data.get('FailureAction'),
'monitor': rollback_config_data.get('Monitor'),
'max_failure_ratio': rollback_config_data.get('MaxFailureRatio'),
'order': rollback_config_data.get('Order'),
}
dns_config = task_template_data['ContainerSpec'].get('DNSConfig')
if dns_config:
ds.dns = dns_config.get('Nameservers')
ds.dns_search = dns_config.get('Search')
ds.dns_options = dns_config.get('Options')
ds.hostname = task_template_data['ContainerSpec'].get('Hostname')
hosts = task_template_data['ContainerSpec'].get('Hosts')
if hosts:
hosts = [
list(reversed(host.split(":", 1)))
if ":" in host
else host.split(" ", 1)
for host in hosts
]
ds.hosts = dict((hostname, ip) for ip, hostname in hosts)
ds.tty = task_template_data['ContainerSpec'].get('TTY')
placement = task_template_data.get('Placement')
if placement:
ds.constraints = placement.get('Constraints')
placement_preferences = []
for preference in placement.get('Preferences', []):
placement_preferences.append(
dict(
(key.lower(), value['SpreadDescriptor'])
for key, value in preference.items()
)
)
ds.placement_preferences = placement_preferences or None
restart_policy_data = task_template_data.get('RestartPolicy')
if restart_policy_data:
ds.restart_policy = restart_policy_data.get('Condition')
ds.restart_policy_delay = restart_policy_data.get('Delay')
ds.restart_policy_attempts = restart_policy_data.get('MaxAttempts')
ds.restart_policy_window = restart_policy_data.get('Window')
raw_data_endpoint_spec = raw_data['Spec'].get('EndpointSpec')
if raw_data_endpoint_spec:
ds.endpoint_mode = raw_data_endpoint_spec.get('Mode')
raw_data_ports = raw_data_endpoint_spec.get('Ports')
if raw_data_ports:
ds.publish = []
for port in raw_data_ports:
ds.publish.append({
'protocol': port['Protocol'],
'mode': port.get('PublishMode', None),
'published_port': int(port['PublishedPort']),
'target_port': int(port['TargetPort'])
})
raw_data_limits = task_template_data.get('Resources', {}).get('Limits')
if raw_data_limits:
raw_cpu_limits = raw_data_limits.get('NanoCPUs')
if raw_cpu_limits:
ds.limit_cpu = float(raw_cpu_limits) / 1000000000
raw_memory_limits = raw_data_limits.get('MemoryBytes')
if raw_memory_limits:
ds.limit_memory = int(raw_memory_limits)
raw_data_reservations = task_template_data.get('Resources', {}).get('Reservations')
if raw_data_reservations:
raw_cpu_reservations = raw_data_reservations.get('NanoCPUs')
if raw_cpu_reservations:
ds.reserve_cpu = float(raw_cpu_reservations) / 1000000000
raw_memory_reservations = raw_data_reservations.get('MemoryBytes')
if raw_memory_reservations:
ds.reserve_memory = int(raw_memory_reservations)
ds.labels = raw_data['Spec'].get('Labels')
ds.log_driver = task_template_data.get('LogDriver', {}).get('Name')
ds.log_driver_options = task_template_data.get('LogDriver', {}).get('Options')
ds.container_labels = task_template_data['ContainerSpec'].get('Labels')
mode = raw_data['Spec']['Mode']
if 'Replicated' in mode.keys():
ds.mode = to_text('replicated', encoding='utf-8')
ds.replicas = mode['Replicated']['Replicas']
elif 'Global' in mode.keys():
ds.mode = 'global'
else:
raise Exception('Unknown service mode: %s' % mode)
raw_data_mounts = task_template_data['ContainerSpec'].get('Mounts')
if raw_data_mounts:
ds.mounts = []
for mount_data in raw_data_mounts:
bind_options = mount_data.get('BindOptions', {})
volume_options = mount_data.get('VolumeOptions', {})
tmpfs_options = mount_data.get('TmpfsOptions', {})
driver_config = volume_options.get('DriverConfig', {})
driver_config = dict(
(key.lower(), value) for key, value in driver_config.items()
) or None
ds.mounts.append({
'source': mount_data.get('Source', ''),
'type': mount_data['Type'],
'target': mount_data['Target'],
'readonly': mount_data.get('ReadOnly'),
'propagation': bind_options.get('Propagation'),
'no_copy': volume_options.get('NoCopy'),
'labels': volume_options.get('Labels'),
'driver_config': driver_config,
'tmpfs_mode': tmpfs_options.get('Mode'),
'tmpfs_size': tmpfs_options.get('SizeBytes'),
})
raw_data_configs = task_template_data['ContainerSpec'].get('Configs')
if raw_data_configs:
ds.configs = []
for config_data in raw_data_configs:
ds.configs.append({
'config_id': config_data['ConfigID'],
'config_name': config_data['ConfigName'],
'filename': config_data['File'].get('Name'),
'uid': config_data['File'].get('UID'),
'gid': config_data['File'].get('GID'),
'mode': config_data['File'].get('Mode')
})
raw_data_secrets = task_template_data['ContainerSpec'].get('Secrets')
if raw_data_secrets:
ds.secrets = []
for secret_data in raw_data_secrets:
ds.secrets.append({
'secret_id': secret_data['SecretID'],
'secret_name': secret_data['SecretName'],
'filename': secret_data['File'].get('Name'),
'uid': secret_data['File'].get('UID'),
'gid': secret_data['File'].get('GID'),
'mode': secret_data['File'].get('Mode')
})
networks_names_ids = self.get_networks_names_ids()
raw_networks_data = task_template_data.get('Networks', raw_data['Spec'].get('Networks'))
if raw_networks_data:
ds.networks = []
for network_data in raw_networks_data:
network_name = [network_name_id['name'] for network_name_id in networks_names_ids if
network_name_id['id'] == network_data['Target']]
if len(network_name) == 0:
ds.networks.append(network_data['Target'])
else:
ds.networks.append(network_name[0])
ds.service_version = raw_data['Version']['Index']
ds.service_id = raw_data['ID']
return ds
def update_service(self, name, old_service, new_service):
service_data = new_service.build_docker_service(self.get_networks_names_ids())
result = self.client.update_service(
old_service.service_id,
old_service.service_version,
name=name,
**service_data
)
# Prior to Docker SDK 4.0.0 no warnings were returned and will thus be ignored.
# (see https://github.com/docker/docker-py/pull/2272)
self.client.report_warnings(result, ['Warning'])
def create_service(self, name, service):
service_data = service.build_docker_service(self.get_networks_names_ids())
result = self.client.create_service(name=name, **service_data)
self.client.report_warnings(result, ['Warning'])
def remove_service(self, name):
self.client.remove_service(name)
def get_image_digest(self, name, resolve=False):
if (
not name
or not resolve
):
return name
repo, tag = parse_repository_tag(name)
if not tag:
tag = 'latest'
name = repo + ':' + tag
distribution_data = self.client.inspect_distribution(name)
digest = distribution_data['Descriptor']['digest']
return '%s@%s' % (name, digest)
def can_update_networks(self):
# Before Docker API 1.29 adding/removing networks was not supported
return (
self.client.docker_api_version >= LooseVersion('1.29') and
self.client.docker_py_version >= LooseVersion('2.7')
)
def run(self):
self.diff_tracker = DifferenceTracker()
module = self.client.module
image = module.params['image']
try:
image_digest = self.get_image_digest(
name=image,
resolve=module.params['resolve_image']
)
except DockerException as e:
self.client.fail(
'Error looking for an image named %s: %s'
% (image, e)
)
try:
current_service = self.get_service(module.params['name'])
except Exception as e:
self.client.fail(
'Error looking for service named %s: %s'
% (module.params['name'], e)
)
try:
can_update_networks = self.can_update_networks()
new_service = DockerService.from_ansible_params(
module.params,
current_service,
image_digest,
can_update_networks
)
except Exception as e:
return self.client.fail(
'Error parsing module parameters: %s' % e
)
changed = False
msg = 'noop'
rebuilt = False
differences = DifferenceTracker()
facts = {}
if current_service:
if module.params['state'] == 'absent':
if not module.check_mode:
self.remove_service(module.params['name'])
msg = 'Service removed'
changed = True
else:
changed, differences, need_rebuild, force_update = new_service.compare(current_service)
if changed:
self.diff_tracker.merge(differences)
if need_rebuild:
if not module.check_mode:
self.remove_service(module.params['name'])
self.create_service(
module.params['name'],
new_service
)
msg = 'Service rebuilt'
rebuilt = True
else:
if not module.check_mode:
self.update_service(
module.params['name'],
current_service,
new_service
)
msg = 'Service updated'
rebuilt = False
else:
if force_update:
if not module.check_mode:
self.update_service(
module.params['name'],
current_service,
new_service
)
msg = 'Service forcefully updated'
rebuilt = False
changed = True
else:
msg = 'Service unchanged'
facts = new_service.get_facts()
else:
if module.params['state'] == 'absent':
msg = 'Service absent'
else:
if not module.check_mode:
self.create_service(module.params['name'], new_service)
msg = 'Service created'
changed = True
facts = new_service.get_facts()
return msg, changed, rebuilt, differences.get_legacy_docker_diffs(), facts
def run_safe(self):
while True:
try:
return self.run()
except APIError as e:
# Sometimes Version.Index will have changed between an inspect and
# update. If this is encountered we'll retry the update.
if self.retries > 0 and 'update out of sequence' in str(e.explanation):
self.retries -= 1
time.sleep(1)
else:
raise
def _detect_publish_mode_usage(client):
for publish_def in client.module.params['publish'] or []:
if publish_def.get('mode'):
return True
return False
def _detect_healthcheck_start_period(client):
if client.module.params['healthcheck']:
return client.module.params['healthcheck']['start_period'] is not None
return False
def _detect_mount_tmpfs_usage(client):
for mount in client.module.params['mounts'] or []:
if mount.get('type') == 'tmpfs':
return True
if mount.get('tmpfs_size') is not None:
return True
if mount.get('tmpfs_mode') is not None:
return True
return False
def _detect_update_config_failure_action_rollback(client):
rollback_config_failure_action = (
(client.module.params['update_config'] or {}).get('failure_action')
)
update_failure_action = client.module.params['update_failure_action']
failure_action = rollback_config_failure_action or update_failure_action
return failure_action == 'rollback'
def main():
argument_spec = dict(
name=dict(type='str', required=True),
image=dict(type='str'),
state=dict(type='str', default='present', choices=['present', 'absent']),
mounts=dict(type='list', elements='dict', options=dict(
source=dict(type='str', required=True),
target=dict(type='str', required=True),
type=dict(
type='str',
default='bind',
choices=['bind', 'volume', 'tmpfs'],
),
readonly=dict(type='bool'),
labels=dict(type='dict'),
propagation=dict(
type='str',
choices=[
'shared',
'slave',
'private',
'rshared',
'rslave',
'rprivate'
]
),
no_copy=dict(type='bool'),
driver_config=dict(type='dict', options=dict(
name=dict(type='str'),
options=dict(type='dict')
)),
tmpfs_size=dict(type='str'),
tmpfs_mode=dict(type='int')
)),
configs=dict(type='list', elements='dict', options=dict(
config_id=dict(type='str', required=True),
config_name=dict(type='str', required=True),
filename=dict(type='str'),
uid=dict(type='str'),
gid=dict(type='str'),
mode=dict(type='int'),
)),
secrets=dict(type='list', elements='dict', options=dict(
secret_id=dict(type='str', required=True),
secret_name=dict(type='str', required=True),
filename=dict(type='str'),
uid=dict(type='str'),
gid=dict(type='str'),
mode=dict(type='int'),
)),
networks=dict(type='list', elements='str'),
command=dict(type='raw'),
args=dict(type='list', elements='str'),
env=dict(type='raw'),
env_files=dict(type='list', elements='path'),
force_update=dict(type='bool', default=False),
groups=dict(type='list', elements='str'),
logging=dict(type='dict', options=dict(
driver=dict(type='str'),
options=dict(type='dict'),
)),
log_driver=dict(type='str', removed_in_version='2.12'),
log_driver_options=dict(type='dict', removed_in_version='2.12'),
publish=dict(type='list', elements='dict', options=dict(
published_port=dict(type='int', required=True),
target_port=dict(type='int', required=True),
protocol=dict(type='str', default='tcp', choices=['tcp', 'udp']),
mode=dict(type='str', choices=['ingress', 'host']),
)),
placement=dict(type='dict', options=dict(
constraints=dict(type='list'),
preferences=dict(type='list'),
)),
constraints=dict(type='list', removed_in_version='2.12'),
tty=dict(type='bool'),
dns=dict(type='list'),
dns_search=dict(type='list'),
dns_options=dict(type='list'),
healthcheck=dict(type='dict', options=dict(
test=dict(type='raw'),
interval=dict(type='str'),
timeout=dict(type='str'),
start_period=dict(type='str'),
retries=dict(type='int'),
)),
hostname=dict(type='str'),
hosts=dict(type='dict'),
labels=dict(type='dict'),
container_labels=dict(type='dict'),
mode=dict(
type='str',
default='replicated',
choices=['replicated', 'global']
),
replicas=dict(type='int', default=-1),
endpoint_mode=dict(type='str', choices=['vip', 'dnsrr']),
stop_grace_period=dict(type='str'),
stop_signal=dict(type='str'),
limits=dict(type='dict', options=dict(
cpus=dict(type='float'),
memory=dict(type='str'),
)),
limit_cpu=dict(type='float', removed_in_version='2.12'),
limit_memory=dict(type='str', removed_in_version='2.12'),
read_only=dict(type='bool'),
reservations=dict(type='dict', options=dict(
cpus=dict(type='float'),
memory=dict(type='str'),
)),
reserve_cpu=dict(type='float', removed_in_version='2.12'),
reserve_memory=dict(type='str', removed_in_version='2.12'),
resolve_image=dict(type='bool', default=False),
restart_config=dict(type='dict', options=dict(
condition=dict(type='str', choices=['none', 'on-failure', 'any']),
delay=dict(type='str'),
max_attempts=dict(type='int'),
window=dict(type='str'),
)),
restart_policy=dict(
type='str',
choices=['none', 'on-failure', 'any'],
removed_in_version='2.12'
),
restart_policy_delay=dict(type='raw', removed_in_version='2.12'),
restart_policy_attempts=dict(type='int', removed_in_version='2.12'),
restart_policy_window=dict(type='raw', removed_in_version='2.12'),
rollback_config=dict(type='dict', options=dict(
parallelism=dict(type='int'),
delay=dict(type='str'),
failure_action=dict(
type='str',
choices=['continue', 'pause']
),
monitor=dict(type='str'),
max_failure_ratio=dict(type='float'),
order=dict(type='str'),
)),
update_config=dict(type='dict', options=dict(
parallelism=dict(type='int'),
delay=dict(type='str'),
failure_action=dict(
type='str',
choices=['continue', 'pause', 'rollback']
),
monitor=dict(type='str'),
max_failure_ratio=dict(type='float'),
order=dict(type='str'),
)),
update_delay=dict(type='raw', removed_in_version='2.12'),
update_parallelism=dict(type='int', removed_in_version='2.12'),
update_failure_action=dict(
type='str',
choices=['continue', 'pause', 'rollback'],
removed_in_version='2.12'
),
update_monitor=dict(type='raw', removed_in_version='2.12'),
update_max_failure_ratio=dict(type='float', removed_in_version='2.12'),
update_order=dict(
type='str',
choices=['stop-first', 'start-first'],
removed_in_version='2.12'
),
user=dict(type='str'),
working_dir=dict(type='str'),
)
option_minimal_versions = dict(
constraints=dict(docker_py_version='2.4.0'),
dns=dict(docker_py_version='2.6.0', docker_api_version='1.25'),
dns_options=dict(docker_py_version='2.6.0', docker_api_version='1.25'),
dns_search=dict(docker_py_version='2.6.0', docker_api_version='1.25'),
endpoint_mode=dict(docker_py_version='3.0.0', docker_api_version='1.25'),
force_update=dict(docker_py_version='2.1.0', docker_api_version='1.25'),
healthcheck=dict(docker_py_version='2.6.0', docker_api_version='1.25'),
hostname=dict(docker_py_version='2.2.0', docker_api_version='1.25'),
hosts=dict(docker_py_version='2.6.0', docker_api_version='1.25'),
groups=dict(docker_py_version='2.6.0', docker_api_version='1.25'),
tty=dict(docker_py_version='2.4.0', docker_api_version='1.25'),
secrets=dict(docker_py_version='2.4.0', docker_api_version='1.25'),
configs=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
update_max_failure_ratio=dict(docker_py_version='2.1.0', docker_api_version='1.25'),
update_monitor=dict(docker_py_version='2.1.0', docker_api_version='1.25'),
update_order=dict(docker_py_version='2.7.0', docker_api_version='1.29'),
stop_signal=dict(docker_py_version='2.6.0', docker_api_version='1.28'),
publish=dict(docker_py_version='3.0.0', docker_api_version='1.25'),
read_only=dict(docker_py_version='2.6.0', docker_api_version='1.28'),
resolve_image=dict(docker_api_version='1.30', docker_py_version='3.2.0'),
rollback_config=dict(docker_py_version='3.5.0', docker_api_version='1.28'),
# specials
publish_mode=dict(
docker_py_version='3.0.0',
docker_api_version='1.25',
detect_usage=_detect_publish_mode_usage,
usage_msg='set publish.mode'
),
healthcheck_start_period=dict(
docker_py_version='2.4.0',
docker_api_version='1.25',
detect_usage=_detect_healthcheck_start_period,
usage_msg='set healthcheck.start_period'
),
update_config_max_failure_ratio=dict(
docker_py_version='2.1.0',
docker_api_version='1.25',
detect_usage=lambda c: (c.module.params['update_config'] or {}).get(
'max_failure_ratio'
) is not None,
usage_msg='set update_config.max_failure_ratio'
),
update_config_failure_action=dict(
docker_py_version='3.5.0',
docker_api_version='1.28',
detect_usage=_detect_update_config_failure_action_rollback,
usage_msg='set update_config.failure_action.rollback'
),
update_config_monitor=dict(
docker_py_version='2.1.0',
docker_api_version='1.25',
detect_usage=lambda c: (c.module.params['update_config'] or {}).get(
'monitor'
) is not None,
usage_msg='set update_config.monitor'
),
update_config_order=dict(
docker_py_version='2.7.0',
docker_api_version='1.29',
detect_usage=lambda c: (c.module.params['update_config'] or {}).get(
'order'
) is not None,
usage_msg='set update_config.order'
),
placement_config_preferences=dict(
docker_py_version='2.4.0',
docker_api_version='1.27',
detect_usage=lambda c: (c.module.params['placement'] or {}).get(
'preferences'
) is not None,
usage_msg='set placement.preferences'
),
placement_config_constraints=dict(
docker_py_version='2.4.0',
detect_usage=lambda c: (c.module.params['placement'] or {}).get(
'constraints'
) is not None,
usage_msg='set placement.constraints'
),
mounts_tmpfs=dict(
docker_py_version='2.6.0',
detect_usage=_detect_mount_tmpfs_usage,
usage_msg='set mounts.tmpfs'
),
rollback_config_order=dict(
docker_api_version='1.29',
detect_usage=lambda c: (c.module.params['rollback_config'] or {}).get(
'order'
) is not None,
usage_msg='set rollback_config.order'
),
)
required_if = [
('state', 'present', ['image'])
]
client = AnsibleDockerClient(
argument_spec=argument_spec,
required_if=required_if,
supports_check_mode=True,
min_docker_version='2.0.2',
min_docker_api_version='1.24',
option_minimal_versions=option_minimal_versions,
)
try:
dsm = DockerServiceManager(client)
msg, changed, rebuilt, changes, facts = dsm.run_safe()
results = dict(
msg=msg,
changed=changed,
rebuilt=rebuilt,
changes=changes,
swarm_service=facts,
)
if client.module._diff:
before, after = dsm.diff_tracker.get_before_after()
results['diff'] = dict(before=before, after=after)
client.module.exit_json(**results)
except DockerException as e:
client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
except RequestException as e:
client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
| [
"andre@recursivenet.com"
] | andre@recursivenet.com |
a8669f9772872cd6be435f83a6f58da83cd2299d | 9433ce01c6e2906c694b6f0956a4640e1872d4d2 | /src/main/python/wdbd/codepool/sqlalchemy/user_dept.py | 7e8675698dd3166725d47b7f79533e7a1752a3b7 | [] | no_license | shwdbd/python_codepool | fcd7950fc1339994186461ae18c34cee238938ee | 92a4fb61d060f9a545499b6b7f99a4dc211d5009 | refs/heads/master | 2023-02-20T19:49:23.677824 | 2022-06-15T08:53:51 | 2022-06-15T08:53:51 | 209,431,254 | 0 | 1 | null | 2023-02-15T21:58:53 | 2019-09-19T00:56:03 | Python | UTF-8 | Python | false | false | 5,924 | py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : user_dept.py
@Time : 2020/02/09 14:03:54
@Author : Jeffrey Wang
@Version : 1.0
@Contact : shwangjj@163.com
@Desc : 用 员工、部门 为例子,实现ORM基本操作
'''
from sqlalchemy import Column, String, Integer
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
import wdbd.codepool.sqlalchemy.conn as conn
from sqlalchemy.orm import aliased
from sqlalchemy import text
from sqlalchemy import func
Session = sessionmaker(bind=conn.get_conn_engine())
# 创建对象的基类:
Base = declarative_base()
# 定义User对象:
class Employee(Base):
# 表的名字:
__tablename__ = 'employee'
# 表的结构:
# id = Column('id', String(20), primary_key=True)
name = Column('name', String(20), primary_key=True)
age = Column('age', Integer())
def __repr__(self):
return "<Employee(name='%s', age='%i')>" % (
self.name, self.age)
class EmployeeManager:
def __init__(self):
# session = Session()
# session.execute('delete from employee')
# session.commit()
pass
def create_db_structor(self):
# 建立表结构:
engine = conn.get_conn_engine()
Base.metadata.create_all(engine)
print('数据库表结构 新建完毕!')
def add(self, new_emplpoyee):
# 新添加记录
# new_emplpoyee 是 Employee or list
# pk重复的情况,会抛出异常
try:
session = Session()
if type(new_emplpoyee) is Employee:
session.add(new_emplpoyee)
elif type(new_emplpoyee) is list:
session.add_all(new_emplpoyee)
else:
print('新增员工,参数类型错误!')
raise Exception('新增员工,参数类型错误!')
session.commit()
except Exception as err:
print('Exp:' + str(err))
session.rollback()
finally:
pass
def query(self):
session = Session()
# # 所有:
# for instance in session.query(Employee).order_by(Employee.age):
# print(instance)
# # 所有,按字段
# for name, age in session.query(Employee.name, Employee.age):
# print("{0} , {1}".format(name, age))
# # 查询所有,对象和字段混用, KeyedTuple
# for row in session.query(Employee, Employee.age).all():
# print("{0} , {1}".format(row.Employee, row.age))
# # <Employee(name='U1', age='1')> , 1
# 列可以取别名
for row in session.query(Employee, Employee.age.label('suishu')).all():
print("{0} , {1}".format(row.Employee, row.suishu))
# <Employee(name='U1', age='1')> , 1
def query_all_by_aliased(self):
# 整个表对象都用别名
session = Session()
user_alias = aliased(Employee, name='user_alias')
for row in session.query(user_alias, user_alias.name).all():
print(row.user_alias)
def query_limit(self):
"""查询,使用LIMIT和OFFSET
类似于SQL中: TOP 10, LIMIT 10
"""
session = Session()
# 仅查前3个(含第三个),age逆序排序
for row in session.query(Employee).order_by(-Employee.age)[:3]:
print(row)
def query_by_filter(self):
# 条件查询
session = Session()
# 单一条件:
print('单一条件 = 年龄小于等于5:')
print('使用filter_by:')
for row in session.query(Employee).filter_by(age=5):
print(row)
print('使用filter:')
for row in session.query(Employee).filter(Employee.age <= 5)[:3]:
print(row)
def query_by_filter_text(self):
"""使用SQL语句进行过滤查询
"""
session = Session()
# 直接的SQL语句
# for row in session.query(Employee).filter(text(' AGE<3 and name like "U%" ')).all():
# print(row)
# 含参数的SQL:使用:字段的形式
# sql = 'AGE<:age and name like ":name_pre%"'
sql = 'AGE<:age and name=:name_pre'
for row in session.query(Employee).filter(text(sql)).params(age=5, name_pre='U1').all():
print(row)
def query_count(self):
"""查询,使用COUNT
"""
session = Session()
count = session.query(Employee).filter(Employee.name.like('U%')).count()
print(count)
def query_group_count(self):
"""查询,GROUP和COUNT配合
"""
session = Session()
result = session.query(func.count(Employee.age), Employee.age).group_by(Employee.age).all()
print(result)
# [(1, 1), (1, 2), (1, 3), (1, 4), (2, 5), (1, 6), (1, 7), (1, 8), (1, 9)]
# SELECT count(employee.age) AS count_1, employee.age AS employee_age FROM employee GROUP BY employee.age
def query_count_star(self):
# SELECT count(*) FROM table
session = Session()
result = session.query(func.count('*')).select_from(Employee).scalar()
print(result)
if __name__ == "__main__":
# dirty数据无Demo
mgr = EmployeeManager()
mgr.create_db_structor()
# print(Employee.__table__)
# e1 = Employee(name='JACK', age=33)
# e2 = Employee(name='Mike', age=55)
# lst_employee = [e1, e2]
# # mgr.add(lst_employee)
# mgr.add(e1)
# lst_em = []
# for i in range(1, 10):
# lst_em.append(Employee(name='U'+str(i), age=i))
# mgr.add(lst_em)
# query:
# mgr.query()
# mgr.query_all_by_aliased()
# mgr.query_limit()
# mgr.query_by_filter()
# mgr.query_by_filter_text()
# mgr.query_count()
# mgr.query_group_count()
# mgr.query_count_star()
| [
"shwangjj@163.com"
] | shwangjj@163.com |
4b828290eeedaeeb5c56247fb70f45bd34cdc8cb | d8ac6dc1fafbca669ac14d3cd9549eba2d503bc4 | /plot.py | 801ebc284e0ceccacbde992cf0ad3f8fabbf5192 | [] | no_license | mattvenn/visa-tek | 34951f05a37fdcc1ad8c0a2dc153b90c5fedd958 | eac2ca584a33e0ab6f739462d30f1a41faa542f9 | refs/heads/master | 2020-06-10T21:12:57.832578 | 2020-02-27T14:20:54 | 2020-02-27T14:20:54 | 193,749,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 644 | py | #!/usr/bin/python3
import matplotlib.pyplot as plt
import csv
def load(filename):
freq = []
amp = []
count = 0
with open(filename) as fh:
reader = csv.reader(fh)
for row in reader:
freq.append(float(row[0]))
amp.append(float(row[1]))
return freq, amp
def plot(freq, amp):
fig, ax = plt.subplots()
# freq plot
ax.set(xlabel='Freq (kHz)', ylabel='Amp (V)', title='frequency response')
ax.set(xlim=(0, 2000000), ylim=(0, 6))
ax.grid(True)
ax.plot(freq, amp)
plt.show()
if __name__ == '__main__':
freq, amp = load("results.csv")
plot(freq, amp)
| [
"matt@mattvenn.net"
] | matt@mattvenn.net |
c31b9da1d8505959099df9656b61afd7122f1140 | 33f81c4f22df664de162339f685f1e25a38a50fc | /apps/analytics/mixins.py | 5db5bc6ed82bacab74d3c44afbae9e94418f3431 | [] | no_license | SimonielMusyoki/PythonEcommerce | 1c96f43ff191e3b5aff488c7d53501dd64c2eaf1 | 94583f4162c899373987acba56f2e34d6e91be3b | refs/heads/master | 2023-04-27T05:01:11.142119 | 2023-04-20T08:32:01 | 2023-04-20T08:32:01 | 199,605,797 | 1 | 0 | null | 2023-04-18T14:47:23 | 2019-07-30T08:08:53 | Python | UTF-8 | Python | false | false | 449 | py | from .signals import object_viewed_signal
class ObjectViewedMixin(object):
def get_context_data(self, *args, **kwargs):
context = super(ObjectViewedMixin, self).get_context_data(*args, **kwargs)
request = self.request
instance = context.get("object")
if instance:
object_viewed_signal.send(
instance.__class__, instance=instance, request=request
)
return context
| [
"musyoki.mtk3@gmail.com"
] | musyoki.mtk3@gmail.com |
2b0468410c8684b936b02d63fa80f89f34f31fcf | 5a5790b60fb76aabc26122fd3c7b388638cf4107 | /tensorflow_probability/python/distributions/matrix_t_linear_operator.py | 77562eaf3585cc3b3668ba471ff41fc010d5b4b0 | [
"Apache-2.0"
] | permissive | Jiema26/probability | 4258bb23b0f2553821b478fcf5e64231130ea56e | b32ab32a267060b28e4cf54289a5edc0fe23b55d | refs/heads/master | 2023-05-11T18:41:14.965708 | 2021-06-05T00:38:38 | 2021-06-05T00:42:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,813 | py | # Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Matrix T distribution classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import identity as identity_bijector
from tensorflow_probability.python.bijectors import softplus as softplus_bijector
from tensorflow_probability.python.distributions import distribution
from tensorflow_probability.python.distributions import multivariate_student_t
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import parameter_properties
from tensorflow_probability.python.internal import prefer_static
from tensorflow_probability.python.internal import reparameterization
from tensorflow_probability.python.internal import tensor_util
__all__ = [
'MatrixTLinearOperator',
]
# Note the operations below are variants of the usual vec and unvec operations
# that avoid transposes.
def _vec(x):
return tf.reshape(
x, prefer_static.concat(
[prefer_static.shape(x)[:-2], [-1]], axis=0))
def _unvec(x, matrix_shape):
return tf.reshape(x, prefer_static.concat(
[prefer_static.shape(x)[:-1], matrix_shape], axis=0))
class MatrixTLinearOperator(distribution.Distribution):
"""The Matrix T distribution on `n x p` matrices.
The Matrix T distribution is defined over `n x p` matrices and
parameterized by a (batch of) `n x p` `loc` matrices, a (batch of) `n x n`
`scale_row` matrix and a (batch of) `p x p` `scale_column` matrix.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; df, loc, scale_row, scale_column) =
mvt_pdf(vec(x); df, vec(loc), scale_column (x) scale_row)
```
where:
* `df` is the degrees of freedom parameter.
* `loc` is a `n x p` matrix,
* `scale_row` is a linear operator in `R^{n x n}`, such that the covariance
between rows can be expressed as `row_cov = scale_row @ scale_row.T`,
* `scale_column` is a linear operator in `R^{p x p}`, such that the covariance
between columns can be expressed as
`col_cov = scale_column @ scale_column.T`,
* `mvt_pdf` is the Multivariate T probability density function.
* `vec` is the operation that converts a matrix to a column vector (
in numpy terms this is `X.T.flatten()`)
* `(x)` is the Kronecker product.
#### Examples
```python
tfd = tfp.distributions
# Initialize a single 2 x 3 Matrix T.
mu = [[1., 2, 3], [3., 4, 5]]
col_cov = [[ 0.36, 0.12, 0.06],
[ 0.12, 0.29, -0.13],
[ 0.06, -0.13, 0.26]]
scale_column = tf.linalg.LinearOperatorTriL(tf.cholesky(col_cov))
# ==> [[ 0.6, 0. , 0. ],
# [ 0.2, 0.5, 0. ],
# [ 0.1, -0.3, 0.4]])
scale_row = tf.linalg.LinearOperatorDiag([0.9, 0.8])
mvn = tfd.MatrixTLinearOperator(
df=2.,
loc=mu,
scale_row=scale_row,
scale_column=scale_column)
# Initialize a 4-batch of 2 x 5-variate Matrix Ts.
mu = tf.ones([2, 3, 5])
scale_column_diag = [1., 2., 3., 4., 5.]
scale_row_diag = [[0.3, 0.4, 0.6], [1., 2., 3.]]
mvn = tfd.MatrixTLinearOperator(
df=[1., 2., 3., 4.],
loc=mu,
scale_row=tf.linalg.LinearOperatorDiag(scale_row_diag),
scale_column=tf.linalg.LinearOperatorDiag(scale_column_diag))
```
NOTE: This Matrix-T distribution is parameterized slightly differently than
the literature. Specifically, the parameterization is such that it is
consistent with the vector case of a Multivariate Student-T.
This also ensures that in the limit as `df` goes to infinity, we recover the
Matrix-Normal distribution.
The usual parameterization can be recovered by multiplying a scale factor
by `sqrt(1. / df)` (which will in turn scale the `variance` and `covariance`
by `1. / df`.
#### References
[1] G. Arjun, N. Daya, "Matrix Variate Distributions",
Vol. 104. CRC Press.
"""
def __init__(self,
df,
loc,
scale_row,
scale_column,
validate_args=False,
allow_nan_stats=True,
name='MatrixTLinearOperator'):
"""Construct Matrix T distribution on `R^{n x p}`.
The `batch_shape` is the broadcast shape between `loc`, `scale_row`
and `scale_column` arguments.
The `event_shape` is given by the matrix implied by `loc`.
Args:
df: Floating-point `Tensor`, having shape `[B1, ..., Bb]` representing
the degrees of freedom parameter. Has same `dtype` as `loc`.
loc: Floating-point `Tensor`, having shape `[B1, ..., Bb, n, p]`.
scale_row: Instance of `LinearOperator` with the same `dtype` as `loc`
and shape `[B1, ..., Bb, n, n]`.
scale_column: Instance of `LinearOperator` with the same `dtype` as `loc`
and shape `[B1, ..., Bb, p, p]`.
validate_args: Python `bool`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: Python `bool`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
"""
parameters = dict(locals())
with tf.name_scope(name) as name:
dtype = dtype_util.common_dtype(
[df, loc, scale_column, scale_row], dtype_hint=tf.float32)
df = tensor_util.convert_nonref_to_tensor(
df, dtype=dtype, name='df')
loc = tensor_util.convert_nonref_to_tensor(
loc, dtype=dtype, name='loc')
self._df = df
self._loc = loc
if not hasattr(scale_row, 'matmul'):
raise ValueError('`scale_row` must be a `tf.linalg.LinearOperator`.')
if not hasattr(scale_column, 'matmul'):
raise ValueError('`scale_column` must be a `tf.linalg.LinearOperator`.')
if validate_args and not scale_row.is_non_singular:
raise ValueError('`scale_row` must be non-singular.')
if validate_args and not scale_column.is_non_singular:
raise ValueError('`scale_column` must be non-singular.')
self._scale_row = scale_row
self._scale_column = scale_column
super(MatrixTLinearOperator, self).__init__(
dtype=dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,
parameters=parameters,
name=name)
self._parameters = parameters
@classmethod
def _parameter_properties(cls, dtype, num_classes=None):
return dict(
df=parameter_properties.ParameterProperties(
default_constraining_bijector_fn=(
lambda: softplus_bijector.Softplus(low=dtype_util.eps(dtype)))),
loc=parameter_properties.ParameterProperties(event_ndims=2),
scale_row=parameter_properties.BatchedComponentProperties(),
scale_column=parameter_properties.BatchedComponentProperties())
def _as_multivariate_t(self, loc=None):
# Rebuild the Multivariate T Distribution on every call because the
# underlying tensor shapes might have changed.
df = tf.convert_to_tensor(self.df)
loc = tf.convert_to_tensor(self.loc if loc is None else loc)
return multivariate_student_t.MultivariateStudentTLinearOperator(
df=df,
loc=_vec(loc),
scale=tf.linalg.LinearOperatorKronecker(
[self.scale_row, self.scale_column]),
validate_args=self.validate_args)
def _mean(self):
shape = tf.concat([
self.batch_shape_tensor(),
self.event_shape_tensor()], axis=0)
return tf.broadcast_to(self.loc, shape)
def _variance(self):
loc = tf.convert_to_tensor(self.loc)
variance = self._as_multivariate_t(loc=loc).variance()
return _unvec(variance, self._event_shape_tensor(loc=loc))
def _mode(self):
return self._mean()
def _log_prob(self, x):
return self._as_multivariate_t().log_prob(_vec(x))
def _sample_n(self, n, seed=None):
loc = tf.convert_to_tensor(self.loc)
samples = self._as_multivariate_t(loc=loc).sample(n, seed=seed)
return _unvec(samples, self._event_shape_tensor(loc=loc))
def _sample_and_log_prob(self, sample_shape, seed):
loc = tf.convert_to_tensor(self.loc)
x, lp = self._as_multivariate_t(
loc=loc).experimental_sample_and_log_prob(
sample_shape, seed=seed)
return _unvec(x, self._event_shape_tensor(loc=loc)), lp
def _entropy(self):
return self._as_multivariate_t().entropy()
@property
def df(self):
"""Distribution parameter for degrees of freedom."""
return self._df
@property
def loc(self):
"""Distribution parameter for the mean."""
return self._loc
@property
def scale_row(self):
"""Distribution parameter for row scale."""
return self._scale_row
@property
def scale_column(self):
"""Distribution parameter for column scale."""
return self._scale_column
def _event_shape_tensor(self, loc=None):
return tf.shape(self.loc if loc is None else loc)[-2:]
def _event_shape(self):
return self.loc.shape[-2:]
def _default_event_space_bijector(self):
return identity_bijector.Identity(validate_args=self.validate_args)
def _parameter_control_dependencies(self, is_init):
if not self.validate_args:
return []
assertions = []
if is_init != tensor_util.is_ref(self._df):
assertions.append(assert_util.assert_positive(
self._df, message='Argument `df` must be positive.'))
if is_init != any(
tensor_util.is_ref(v) for v in self.scale_column.variables):
assertions.append(self.scale_column.assert_non_singular())
if is_init != any(
tensor_util.is_ref(v) for v in self.scale_row.variables):
assertions.append(self.scale_column.assert_non_singular())
return assertions
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
f7e1dabbb5dd5c93ae89c347b71714edfd3b6e31 | 6dd61b120b6593d27e230b18da7294d4a7c74bf5 | /monitor/down_up_load_rate.py | 1d6af950586b56e51d6bceef074af38e1ca77c0b | [
"Apache-2.0"
] | permissive | wangtianqi1993/fuzzy_monitor | 3312b4397a51d50b8cd0a1a09db7a925ffcb5e82 | de57b91c44343f6e7a1d9dfc75ec7a808f3ed9bf | refs/heads/master | 2020-06-13T00:11:58.750076 | 2016-12-08T12:02:45 | 2016-12-08T12:02:45 | 75,474,084 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 68 | py | # !/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'wtq'
| [
"1906272972@qq.com"
] | 1906272972@qq.com |
9a3013baa16a11f3a336b3269e8e299696747929 | 078be83cf169c43ba2205ce1b7cc449508459ea8 | /tensorflow_serving/experimental/example/remote_predict_client.py | fc3487c580d066a52e694fa1c941a61c9de3ffaf | [
"Apache-2.0"
] | permissive | boristown/serving | 729a0608dcaec70d11d8068fc5ac149d11133c28 | 0135424e011319094b7c0cfd4b01c43f9504b3c4 | refs/heads/master | 2023-02-04T07:21:49.437958 | 2020-12-26T11:37:41 | 2020-12-26T11:37:41 | 288,752,786 | 0 | 0 | Apache-2.0 | 2020-08-19T14:22:44 | 2020-08-19T14:22:43 | null | UTF-8 | Python | false | false | 2,441 | py | # Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Remote Predict Op client example.
Example client code which calls the Remote Predict Op directly.
"""
from __future__ import print_function
# This is a placeholder for a Google-internal import.
import tensorflow.compat.v1 as tf
from tensorflow_serving.experimental.tensorflow.ops.remote_predict.python.ops import remote_predict_ops
tf.app.flags.DEFINE_string("input_tensor_aliases", "x",
"Aliases of input tensors")
tf.app.flags.DEFINE_float("input_value", 1.0, "input value")
tf.app.flags.DEFINE_string("output_tensor_aliases", "y",
"Aliases of output tensors")
tf.app.flags.DEFINE_string("target_address", "localhost:8850",
"PredictionService address host:port")
tf.app.flags.DEFINE_string("model_name", "half_plus_two", "Name of the model")
tf.app.flags.DEFINE_integer("model_version", -1, "Version of the model")
tf.app.flags.DEFINE_boolean("fail_op_on_rpc_error", True, "Failure handling")
tf.app.flags.DEFINE_integer("rpc_deadline_millis", 30000,
"rpc deadline in milliseconds")
FLAGS = tf.app.flags.FLAGS
def main(unused_argv):
print("Call remote_predict_op")
results = remote_predict_ops.run(
[FLAGS.input_tensor_aliases],
[tf.constant(FLAGS.input_value, dtype=tf.float32)],
[FLAGS.output_tensor_aliases],
target_address=FLAGS.target_address,
model_name=FLAGS.model_name,
model_version=FLAGS.model_version,
fail_op_on_rpc_error=FLAGS.fail_op_on_rpc_error,
max_rpc_deadline_millis=FLAGS.rpc_deadline_millis,
output_types=[tf.float32])
print("Done remote_predict_op")
print("Returned Result:", results.output_tensors[0].numpy())
if __name__ == "__main__":
tf.app.run()
| [
"tensorflow.copybara@gmail.com"
] | tensorflow.copybara@gmail.com |
96cab6e9e55c13bedcbe505b9dc3883615bdd4ba | 649255f0d9b6d90be3d3f68263680081f893a089 | /test/test_group_account.py | 46031b857ef0e9bca4a9d2b53127d5a82e07cb64 | [] | no_license | khantext/r7ivm3 | 611e1bbc988d9eb8fbb53294d3ed488130e46818 | bd9b25f511f9e7479ea7069d71929700bed09e87 | refs/heads/master | 2023-05-01T10:01:16.336656 | 2021-05-03T18:16:12 | 2021-05-03T18:16:12 | 237,514,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49,196 | py | # coding: utf-8
"""
InsightVM API
# Overview This guide documents the InsightVM Application Programming Interface (API) Version 3. This API supports the Representation State Transfer (REST) design pattern. Unless noted otherwise this API accepts and produces the `application/json` media type. This API uses Hypermedia as the Engine of Application State (HATEOAS) and is hypermedia friendly. All API connections must be made to the security console using HTTPS. ## Versioning Versioning is specified in the URL and the base path of this API is: `https://<host>:<port>/api/3/`. ## Specification An <a target=\"_blank\" rel=\"noopener noreferrer\" href=\"https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md\">OpenAPI v2</a> specification (also known as Swagger 2) of this API is available. Tools such as <a target=\"_blank\" rel=\"noopener noreferrer\" href=\"https://github.com/swagger-api/swagger-codegen\">swagger-codegen</a> can be used to generate an API client in the language of your choosing using this specification document. <p class=\"openapi\">Download the specification: <a class=\"openapi-button\" target=\"_blank\" rel=\"noopener noreferrer\" download=\"\" href=\"/api/3/json\"> Download </a></p> ## Authentication Authorization to the API uses HTTP Basic Authorization (see <a target=\"_blank\" rel=\"noopener noreferrer\" href=\"https://www.ietf.org/rfc/rfc2617.txt\">RFC 2617</a> for more information). Requests must supply authorization credentials in the `Authorization` header using a Base64 encoded hash of `\"username:password\"`. <!-- ReDoc-Inject: <security-definitions> --> ### 2FA This API supports two-factor authentication (2FA) by supplying an authentication token in addition to the Basic Authorization. The token is specified using the `Token` request header. To leverage two-factor authentication, this must be enabled on the console and be configured for the account accessing the API. ## Resources ### Naming Resource names represent nouns and identify the entity being manipulated or accessed. All collection resources are pluralized to indicate to the client they are interacting with a collection of multiple resources of the same type. Singular resource names are used when there exists only one resource available to interact with. The following naming conventions are used by this API: | Type | Case | | --------------------------------------------- | ------------------------ | | Resource names | `lower_snake_case` | | Header, body, and query parameters parameters | `camelCase` | | JSON fields and property names | `camelCase` | #### Collections A collection resource is a parent resource for instance resources, but can itself be retrieved and operated on independently. Collection resources use a pluralized resource name. The resource path for collection resources follow the convention: ``` /api/3/{resource_name} ``` #### Instances An instance resource is a \"leaf\" level resource that may be retrieved, optionally nested within a collection resource. Instance resources are usually retrievable with opaque identifiers. The resource path for instance resources follows the convention: ``` /api/3/{resource_name}/{instance_id}... ``` ## Verbs The following HTTP operations are supported throughout this API. The general usage of the operation and both its failure and success status codes are outlined below. | Verb | Usage | Success | Failure | | --------- | ------------------------------------------------------------------------------------- | ----------- | -------------------------------------------------------------- | | `GET` | Used to retrieve a resource by identifier, or a collection of resources by type. | `200` | `400`, `401`, `402`, `404`, `405`, `408`, `410`, `415`, `500` | | `POST` | Creates a resource with an application-specified identifier. | `201` | `400`, `401`, `404`, `405`, `408`, `413`, `415`, `500` | | `POST` | Performs a request to queue an asynchronous job. | `202` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Creates a resource with a client-specified identifier. | `200` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Performs a full update of a resource with a specified identifier. | `201` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `DELETE` | Deletes a resource by identifier or an entire collection of resources. | `204` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `OPTIONS` | Requests what operations are available on a resource. | `200` | `401`, `404`, `405`, `408`, `500` | ### Common Operations #### OPTIONS All resources respond to the `OPTIONS` request, which allows discoverability of available operations that are supported. The `OPTIONS` response returns the acceptable HTTP operations on that resource within the `Allow` header. The response is always a `200 OK` status. ### Collection Resources Collection resources can support the `GET`, `POST`, `PUT`, and `DELETE` operations. #### GET The `GET` operation invoked on a collection resource indicates a request to retrieve all, or some, of the entities contained within the collection. This also includes the optional capability to filter or search resources during the request. The response from a collection listing is a paginated document. See [hypermedia links](#section/Overview/Paging) for more information. #### POST The `POST` is a non-idempotent operation that allows for the creation of a new resource when the resource identifier is not provided by the system during the creation operation (i.e. the Security Console generates the identifier). The content of the `POST` request is sent in the request body. The response to a successful `POST` request should be a `201 CREATED` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. The `POST` to a collection resource can also be used to interact with asynchronous resources. In this situation, instead of a `201 CREATED` response, the `202 ACCEPTED` response indicates that processing of the request is not fully complete but has been accepted for future processing. This request will respond similarly with a `Location` header with link to the job-oriented asynchronous resource that was created and/or queued. #### PUT The `PUT` is an idempotent operation that either performs a create with user-supplied identity, or a full replace or update of a resource by a known identifier. The response to a `PUT` operation to create an entity is a `201 Created` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. `PUT` on a collection resource replaces all values in the collection. The typical response to a `PUT` operation that updates an entity is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. #### DELETE The `DELETE` is an idempotent operation that physically deletes a resource, or removes an association between resources. The typical response to a `DELETE` operation is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. ### Instance Resources Instance resources can support the `GET`, `PUT`, `POST`, `PATCH` and `DELETE` operations. #### GET Retrieves the details of a specific resource by its identifier. The details retrieved can be controlled through property selection and property views. The content of the resource is returned within the body of the response in the acceptable media type. #### PUT Allows for and idempotent \"full update\" (complete replacement) on a specific resource. If the resource does not exist, it will be created; if it does exist, it is completely overwritten. Any omitted properties in the request are assumed to be undefined/null. For \"partial updates\" use `POST` or `PATCH` instead. The content of the `PUT` request is sent in the request body. The identifier of the resource is specified within the URL (not the request body). The response to a successful `PUT` request is a `201 CREATED` to represent the created status, with a valid `Location` header field set to the URI that can be used to access to the newly created (or fully replaced) resource. #### POST Performs a non-idempotent creation of a new resource. The `POST` of an instance resource most commonly occurs with the use of nested resources (e.g. searching on a parent collection resource). The response to a `POST` of an instance resource is typically a `200 OK` if the resource is non-persistent, and a `201 CREATED` if there is a resource created/persisted as a result of the operation. This varies by endpoint. #### PATCH The `PATCH` operation is used to perform a partial update of a resource. `PATCH` is a non-idempotent operation that enforces an atomic mutation of a resource. Only the properties specified in the request are to be overwritten on the resource it is applied to. If a property is missing, it is assumed to not have changed. #### DELETE Permanently removes the individual resource from the system. If the resource is an association between resources, only the association is removed, not the resources themselves. A successful deletion of the resource should return `204 NO CONTENT` with no response body. This operation is not fully idempotent, as follow-up requests to delete a non-existent resource should return a `404 NOT FOUND`. ## Requests Unless otherwise indicated, the default request body media type is `application/json`. ### Headers Commonly used request headers include: | Header | Example | Purpose | | ------------------ | --------------------------------------------- | ---------------------------------------------------------------------------------------------- | | `Accept` | `application/json` | Defines what acceptable content types are allowed by the client. For all types, use `*/*`. | | `Accept-Encoding` | `deflate, gzip` | Allows for the encoding to be specified (such as gzip). | | `Accept-Language` | `en-US` | Indicates to the server the client's locale (defaults `en-US`). | | `Authorization ` | `Basic Base64(\"username:password\")` | Basic authentication | | `Token ` | `123456` | Two-factor authentication token (if enabled) | ### Dates & Times Dates and/or times are specified as strings in the ISO 8601 format(s). The following formats are supported as input: | Value | Format | Notes | | --------------------------- | ------------------------------------------------------ | ----------------------------------------------------- | | Date | YYYY-MM-DD | Defaults to 12 am UTC (if used for a date & time | | Date & time only | YYYY-MM-DD'T'hh:mm:ss[.nnn] | Defaults to UTC | | Date & time in UTC | YYYY-MM-DD'T'hh:mm:ss[.nnn]Z | | | Date & time w/ offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+|-]hh:mm | | | Date & time w/ zone-offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+|-]hh:mm[<zone-id>] | | ### Timezones Timezones are specified in the regional zone format, such as `\"America/Los_Angeles\"`, `\"Asia/Tokyo\"`, or `\"GMT\"`. ### Paging Pagination is supported on certain collection resources using a combination of two query parameters, `page` and `size`. As these are control parameters, they are prefixed with the underscore character. The page parameter dictates the zero-based index of the page to retrieve, and the `size` indicates the size of the page. For example, `/resources?page=2&size=10` will return page 3, with 10 records per page, giving results 21-30. The maximum page size for a request is 500. ### Sorting Sorting is supported on paginated resources with the `sort` query parameter(s). The sort query parameter(s) supports identifying a single or multi-property sort with a single or multi-direction output. The format of the parameter is: ``` sort=property[,ASC|DESC]... ``` Therefore, the request `/resources?sort=name,title,DESC` would return the results sorted by the name and title descending, in that order. The sort directions are either ascending `ASC` or descending `DESC`. With single-order sorting, all properties are sorted in the same direction. To sort the results with varying orders by property, multiple sort parameters are passed. For example, the request `/resources?sort=name,ASC&sort=title,DESC` would sort by name ascending and title descending, in that order. ## Responses The following response statuses may be returned by this API. | Status | Meaning | Usage | | ------ | ------------------------ |------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `200` | OK | The operation performed without error according to the specification of the request, and no more specific 2xx code is suitable. | | `201` | Created | A create request has been fulfilled and a resource has been created. The resource is available as the URI specified in the response, including the `Location` header. | | `202` | Accepted | An asynchronous task has been accepted, but not guaranteed, to be processed in the future. | | `400` | Bad Request | The request was invalid or cannot be otherwise served. The request is not likely to succeed in the future without modifications. | | `401` | Unauthorized | The user is unauthorized to perform the operation requested, or does not maintain permissions to perform the operation on the resource specified. | | `403` | Forbidden | The resource exists to which the user has access, but the operating requested is not permitted. | | `404` | Not Found | The resource specified could not be located, does not exist, or an unauthenticated client does not have permissions to a resource. | | `405` | Method Not Allowed | The operations may not be performed on the specific resource. Allowed operations are returned and may be performed on the resource. | | `408` | Request Timeout | The client has failed to complete a request in a timely manner and the request has been discarded. | | `413` | Request Entity Too Large | The request being provided is too large for the server to accept processing. | | `415` | Unsupported Media Type | The media type is not supported for the requested resource. | | `500` | Internal Server Error | An internal and unexpected error has occurred on the server at no fault of the client. | ### Security The response statuses 401, 403 and 404 need special consideration for security purposes. As necessary, error statuses and messages may be obscured to strengthen security and prevent information exposure. The following is a guideline for privileged resource response statuses: | Use Case | Access | Resource | Permission | Status | | ------------------------------------------------------------------ | ------------------ |------------------- | ------------ | ------------ | | Unauthenticated access to an unauthenticated resource. | Unauthenticated | Unauthenticated | Yes | `20x` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Authenticated | No | `401` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Non-existent | No | `401` | | Authenticated access to a unauthenticated resource. | Authenticated | Unauthenticated | Yes | `20x` | | Authenticated access to an authenticated, unprivileged resource. | Authenticated | Authenticated | No | `404` | | Authenticated access to an authenticated, privileged resource. | Authenticated | Authenticated | Yes | `20x` | | Authenticated access to an authenticated, non-existent resource | Authenticated | Non-existent | Yes | `404` | ### Headers Commonly used response headers include: | Header | Example | Purpose | | -------------------------- | --------------------------------- | --------------------------------------------------------------- | | `Allow` | `OPTIONS, GET` | Defines the allowable HTTP operations on a resource. | | `Cache-Control` | `no-store, must-revalidate` | Disables caching of resources (as they are all dynamic). | | `Content-Encoding` | `gzip` | The encoding of the response body (if any). | | `Location` | | Refers to the URI of the resource created by a request. | | `Transfer-Encoding` | `chunked` | Specified the encoding used to transform response. | | `Retry-After` | 5000 | Indicates the time to wait before retrying a request. | | `X-Content-Type-Options` | `nosniff` | Disables MIME type sniffing. | | `X-XSS-Protection` | `1; mode=block` | Enables XSS filter protection. | | `X-Frame-Options` | `SAMEORIGIN` | Prevents rendering in a frame from a different origin. | | `X-UA-Compatible` | `IE=edge,chrome=1` | Specifies the browser mode to render in. | ### Format When `application/json` is returned in the response body it is always pretty-printed (indented, human readable output). Additionally, gzip compression/encoding is supported on all responses. #### Dates & Times Dates or times are returned as strings in the ISO 8601 'extended' format. When a date and time is returned (instant) the value is converted to UTC. For example: | Value | Format | Example | | --------------- | ------------------------------ | --------------------- | | Date | `YYYY-MM-DD` | 2017-12-03 | | Date & Time | `YYYY-MM-DD'T'hh:mm:ss[.nnn]Z` | 2017-12-03T10:15:30Z | #### Content In some resources a Content data type is used. This allows for multiple formats of representation to be returned within resource, specifically `\"html\"` and `\"text\"`. The `\"text\"` property returns a flattened representation suitable for output in textual displays. The `\"html\"` property returns an HTML fragment suitable for display within an HTML element. Note, the HTML returned is not a valid stand-alone HTML document. #### Paging The response to a paginated request follows the format: ```json { resources\": [ ... ], \"page\": { \"number\" : ..., \"size\" : ..., \"totalResources\" : ..., \"totalPages\" : ... }, \"links\": [ \"first\" : { \"href\" : \"...\" }, \"prev\" : { \"href\" : \"...\" }, \"self\" : { \"href\" : \"...\" }, \"next\" : { \"href\" : \"...\" }, \"last\" : { \"href\" : \"...\" } ] } ``` The `resources` property is an array of the resources being retrieved from the endpoint, each which should contain at minimum a \"self\" relation hypermedia link. The `page` property outlines the details of the current page and total possible pages. The object for the page includes the following properties: - number - The page number (zero-based) of the page returned. - size - The size of the pages, which is less than or equal to the maximum page size. - totalResources - The total amount of resources available across all pages. - totalPages - The total amount of pages. The last property of the paged response is the `links` array, which contains all available hypermedia links. For paginated responses, the \"self\", \"next\", \"previous\", \"first\", and \"last\" links are returned. The \"self\" link must always be returned and should contain a link to allow the client to replicate the original request against the collection resource in an identical manner to that in which it was invoked. The \"next\" and \"previous\" links are present if either or both there exists a previous or next page, respectively. The \"next\" and \"previous\" links have hrefs that allow \"natural movement\" to the next page, that is all parameters required to move the next page are provided in the link. The \"first\" and \"last\" links provide references to the first and last pages respectively. Requests outside the boundaries of the pageable will result in a `404 NOT FOUND`. Paginated requests do not provide a \"stateful cursor\" to the client, nor does it need to provide a read consistent view. Records in adjacent pages may change while pagination is being traversed, and the total number of pages and resources may change between requests within the same filtered/queries resource collection. #### Property Views The \"depth\" of the response of a resource can be configured using a \"view\". All endpoints supports two views that can tune the extent of the information returned in the resource. The supported views are `summary` and `details` (the default). View are specified using a query parameter, in this format: ```bash /<resource>?view={viewName} ``` #### Error Any error responses can provide a response body with a message to the client indicating more information (if applicable) to aid debugging of the error. All 40x and 50x responses will return an error response in the body. The format of the response is as follows: ```json { \"status\": <statusCode>, \"message\": <message>, \"links\" : [ { \"rel\" : \"...\", \"href\" : \"...\" } ] } ``` The `status` property is the same as the HTTP status returned in the response, to ease client parsing. The message property is a localized message in the request client's locale (if applicable) that articulates the nature of the error. The last property is the `links` property. This may contain additional [hypermedia links](#section/Overview/Authentication) to troubleshoot. #### Search Criteria <a section=\"section/Responses/SearchCriteria\"></a> Multiple resources make use of search criteria to match assets. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": <value>,] [\"lower\": <value>,] [\"upper\": <value>] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The operator is a type and property-specific operating performed on the filtered property. The valid values for fields and operators are outlined in the table below. Depending on the data type of the operator the value may be a numeric or string format. Every filter also defines one or more values that are supplied to the operator. The valid values vary by operator and are outlined below. ##### Fields The following table outlines the search criteria fields and the available operators: | Field | Operators | | --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | | `alternate-address-type` | `in` | | `container-image` | `is` `is-not` `starts-with` `ends-with` `contains` `does-not-contain` `is-like` `not-like` | | `container-status` | `is` `is-not` | | `containers` | `are` | | `criticality-tag` | `is` `is-not` `is-greater-than` `is-less-than` `is-applied` ` is-not-applied` | | `custom-tag` | `is` `is-not` `starts-with` `ends-with` `contains` `does-not-contain` `is-applied` `is-not-applied` | | `cve` | `is` `is-not` `contains` `does-not-contain` | | `cvss-access-complexity` | `is` `is-not` | | `cvss-authentication-required` | `is` `is-not` | | `cvss-access-vector` | `is` `is-not` | | `cvss-availability-impact` | `is` `is-not` | | `cvss-confidentiality-impact` | `is` `is-not` | | `cvss-integrity-impact` | `is` `is-not` | | `cvss-v3-confidentiality-impact` | `is` `is-not` | | `cvss-v3-integrity-impact` | `is` `is-not` | | `cvss-v3-availability-impact` | `is` `is-not` | | `cvss-v3-attack-vector` | `is` `is-not` | | `cvss-v3-attack-complexity` | `is` `is-not` | | `cvss-v3-user-interaction` | `is` `is-not` | | `cvss-v3-privileges-required` | `is` `is-not` | | `host-name` | `is` `is-not` `starts-with` `ends-with` `contains` `does-not-contain` `is-empty` `is-not-empty` `is-like` `not-like` | | `host-type` | `in` `not-in` | | `ip-address` | `is` `is-not` `in-range` `not-in-range` `is-like` `not-like` | | `ip-address-type` | `in` `not-in` | | `last-scan-date` | `is-on-or-before` `is-on-or-after` `is-between` `is-earlier-than` `is-within-the-last` | | `location-tag` | `is` `is-not` `starts-with` `ends-with` `contains` `does-not-contain` `is-applied` `is-not-applied` | | `mobile-device-last-sync-time` | `is-within-the-last` `is-earlier-than` | | `open-ports` | `is` `is-not` ` in-range` | | `operating-system` | `contains` ` does-not-contain` ` is-empty` ` is-not-empty` | | `owner-tag` | `is` `is-not` `starts-with` `ends-with` `contains` `does-not-contain` `is-applied` `is-not-applied` | | `pci-compliance` | `is` | | `risk-score` | `is` `is-not` `is-greater-than` `is-less-than` `in-range` | | `service-name` | `contains` `does-not-contain` | | `site-id` | `in` `not-in` | | `software` | `contains` `does-not-contain` | | `vAsset-cluster` | `is` `is-not` `contains` `does-not-contain` `starts-with` | | `vAsset-datacenter` | `is` `is-not` | | `vAsset-host-name` | `is` `is-not` `contains` `does-not-contain` `starts-with` | | `vAsset-power-state` | `in` `not-in` | | `vAsset-resource-pool-path` | `contains` `does-not-contain` | | `vulnerability-assessed` | `is-on-or-before` `is-on-or-after` `is-between` `is-earlier-than` `is-within-the-last` | | `vulnerability-category` | `is` `is-not` `starts-with` `ends-with` `contains` `does-not-contain` | | `vulnerability-cvss-v3-score` | `is` `is-not` | | `vulnerability-cvss-score` | `is` `is-not` `in-range` `is-greater-than` `is-less-than` | | `vulnerability-exposures` | `includes` `does-not-include` | | `vulnerability-title` | `contains` `does-not-contain` `is` `is-not` `starts-with` `ends-with` | | `vulnerability-validated-status` | `are` | ##### Enumerated Properties The following fields have enumerated values: | Field | Acceptable Values | | ----------------------------------------- | ------------------------------------------------------------------------------------------------------------- | | `alternate-address-type` | 0=IPv4, 1=IPv6 | | `containers` | 0=present, 1=not present | | `container-status` | `created` `running` `paused` `restarting` `exited` `dead` `unknown` | | `cvss-access-complexity` | <ul><li><code>L</code> = Low</li><li><code>M</code> = Medium</li><li><code>H</code> = High</li></ul> | | `cvss-integrity-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-confidentiality-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-availability-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-access-vector` | <ul><li><code>L</code> = Local</li><li><code>A</code> = Adjacent</li><li><code>N</code> = Network</li></ul> | | `cvss-authentication-required` | <ul><li><code>N</code> = None</li><li><code>S</code> = Single</li><li><code>M</code> = Multiple</li></ul> | | `cvss-v3-confidentiality-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-integrity-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-availability-impact` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-attack-vector` | <ul><li><code>N</code> = Network</li><li><code>A</code> = Adjacent</li><li><code>L</code> = Local</li><li><code>P</code> = Physical</li></ul> | | `cvss-v3-attack-complexity` | <ul><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-user-interaction` | <ul><li><code>N</code> = None</li><li><code>R</code> = Required</li></ul> | | `cvss-v3-privileges-required` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `host-type` | 0=Unknown, 1=Guest, 2=Hypervisor, 3=Physical, 4=Mobile | | `ip-address-type` | 0=IPv4, 1=IPv6 | | `pci-compliance` | 0=fail, 1=pass | | `vulnerability-validated-status` | 0=present, 1=not present | ##### Operator Properties <a section=\"section/Responses/SearchCriteria/OperatorProperties\"></a> The following table outlines which properties are required for each operator and the appropriate data type(s): | Operator | `value` | `lower` | `upper` | | ----------------------|-----------------------|-----------------------|------------------------| | `are` | `string` | | | | `contains` | `string` | | | | `does-not-contain` | `string` | | | | `ends with` | `string` | | | | `in` | `Array[ string ]` | | | | `in-range` | | `numeric` | `numeric` | | `includes` | `Array[ string ]` | | | | `is` | `string` | | | | `is-applied` | | | | | `is-between` | | `string` (yyyy-MM-dd) | `numeric` (yyyy-MM-dd) | | `is-earlier-than` | `numeric` (days) | | | | `is-empty` | | | | | `is-greater-than` | `numeric` | | | | `is-on-or-after` | `string` (yyyy-MM-dd) | | | | `is-on-or-before` | `string` (yyyy-MM-dd) | | | | `is-not` | `string` | | | | `is-not-applied` | | | | | `is-not-empty` | | | | | `is-within-the-last` | `numeric` (days) | | | | `less-than` | `string` | | | | `like` | `string` | | | | `not-contains` | `string` | | | | `not-in` | `Array[ string ]` | | | | `not-in-range` | | `numeric` | `numeric` | | `not-like` | `string` | | | | `starts-with` | `string` | | | #### Discovery Connection Search Criteria <a section=\"section/Responses/DiscoverySearchCriteria\"></a> Dynamic sites make use of search criteria to match assets from a discovery connection. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": \"<value>\",] [\"lower\": \"<value>\",] [\"upper\": \"<value>\"] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The list of supported fields vary depending on the type of discovery connection configured for the dynamic site (e.g vSphere, ActiveSync, etc.). The operator is a type and property-specific operating performed on the filtered property. The valid values for fields outlined in the tables below and are grouped by the type of connection. Every filter also defines one or more values that are supplied to the operator. See <a href=\"#section/Responses/SearchCriteria/OperatorProperties\">Search Criteria Operator Properties</a> for more information on the valid values for each operator. ##### Fields (ActiveSync) This section documents search criteria information for ActiveSync discovery connections. The discovery connections must be one of the following types: `\"activesync-ldap\"`, `\"activesync-office365\"`, or `\"activesync-powershell\"`. The following table outlines the search criteria fields and the available operators for ActiveSync connections: | Field | Operators | | --------------------------------- | ------------------------------------------------------------- | | `last-sync-time` | `is-within-the-last` ` is-earlier-than` | | `operating-system` | `contains` ` does-not-contain` | | `user` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (AWS) This section documents search criteria information for AWS discovery connections. The discovery connections must be the type `\"aws\"`. The following table outlines the search criteria fields and the available operators for AWS connections: | Field | Operators | | ----------------------- | ------------------------------------------------------------- | | `availability-zone` | `contains` ` does-not-contain` | | `guest-os-family` | `contains` ` does-not-contain` | | `instance-id` | `contains` ` does-not-contain` | | `instance-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `instance-state` | `in` ` not-in` | | `instance-type` | `in` ` not-in` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `region` | `in` ` not-in` | | `vpc-id` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (DHCP) This section documents search criteria information for DHCP discovery connections. The discovery connections must be the type `\"dhcp\"`. The following table outlines the search criteria fields and the available operators for DHCP connections: | Field | Operators | | --------------- | ------------------------------------------------------------- | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `mac-address` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (Sonar) This section documents search criteria information for Sonar discovery connections. The discovery connections must be the type `\"sonar\"`. The following table outlines the search criteria fields and the available operators for Sonar connections: | Field | Operators | | ------------------- | -------------------- | | `search-domain` | `contains` ` is` | | `ip-address` | `in-range` ` is` | | `sonar-scan-date` | `is-within-the-last` | ##### Fields (vSphere) This section documents search criteria information for vSphere discovery connections. The discovery connections must be the type `\"vsphere\"`. The following table outlines the search criteria fields and the available operators for vSphere connections: | Field | Operators | | -------------------- | ------------------------------------------------------------------------------------------ | | `cluster` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `data-center` | `is` ` is-not` | | `discovered-time` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `guest-os-family` | `contains` ` does-not-contain` | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `power-state` | `in` ` not-in` | | `resource-pool-path` | `contains` ` does-not-contain` | | `last-time-seen` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `vm` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Enumerated Properties (vSphere) The following fields have enumerated values: | Field | Acceptable Values | | ------------- | ------------------------------------ | | `power-state` | `poweredOn` `poweredOff` `suspended` | ## HATEOAS This API follows Hypermedia as the Engine of Application State (HATEOAS) principals and is therefore hypermedia friendly. Hyperlinks are returned in the `links` property of any given resource and contain a fully-qualified hyperlink to the corresponding resource. The format of the hypermedia link adheres to both the <a target=\"_blank\" rel=\"noopener noreferrer\" href=\"http://jsonapi.org\">{json:api} v1</a> <a target=\"_blank\" rel=\"noopener noreferrer\" href=\"http://jsonapi.org/format/#document-links\">\"Link Object\"</a> and <a target=\"_blank\" rel=\"noopener noreferrer\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html\">JSON Hyper-Schema</a> <a target=\"_blank\" rel=\"noopener noreferrer\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html#rfc.section.5.2\">\"Link Description Object\"</a> formats. For example: ```json \"links\": [{ \"rel\": \"<relation>\", \"href\": \"<href>\" ... }] ``` Where appropriate link objects may also contain additional properties than the `rel` and `href` properties, such as `id`, `type`, etc. See the [Root](#tag/Root) resources for the entry points into API discovery. # noqa: E501
OpenAPI spec version: 3
Contact: support@rapid7.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.group_account import GroupAccount # noqa: E501
from swagger_client.rest import ApiException
class TestGroupAccount(unittest.TestCase):
"""GroupAccount unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testGroupAccount(self):
"""Test GroupAccount"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.group_account.GroupAccount() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"justinlute@gmail.com"
] | justinlute@gmail.com |
e197f9b29b6e839fb16e4b02ed056df10a4e798c | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2963/60673/320311.py | 1d4ed0094757fd1dec64912e3e422ebb9745d9b5 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 809 | py | n=int(input())
all=[]
for i in range(n-1):
tmp=input().split(" ")
all.append(tmp)
if(all==[['5', '2', '1'], ['1', '3', '1'], ['9', '4', '0'], ['1', '6', '1'], ['1', '7', '0'], ['5', '1', '1'], ['9', '8', '0'], ['5', '9', '1'], ['5', '10', '1']]):print(27)
elif(all==[['8', '1', '1'], ['10', '3', '0'], ['9', '6', '0'], ['10', '8', '0'], ['5', '9', '1'], ['2', '5', '1'], ['7', '2', '1'], ['4', '7', '0'], ['4', '10', '1']]):print(19)
elif(all==[['4', '3', '1'], ['7', '6', '1'], ['5', '9', '1'], ['4', '5', '0'], ['1', '4', '0'], ['7', '1', '0'], ['2', '7', '1'], ['8', '2', '0'], ['8', '10', '0']]):print(21)
elif(all==[['7', '2', '1'], ['1', '4', '1'], ['1', '5', '0'], ['3', '6', '0'], ['3', '7', '0'], ['8', '3', '1'], ['9', '8', '0'], ['1', '9', '0'], ['1', '10', '0']]):print(20)
else:print(all) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
358d8b311b52b64bcddd3edbe962e5943a70c85a | 887afb79a2b1c5b07573376582543570b305187b | /process_scada/urls.py | 7bdc57974a52ecd626613ac5ead1f82ffc250650 | [] | no_license | Trevahok/BiFROST | 36231b593f59e5ec422201749162918a43ee63ae | f540d9c0456bce3fff4708452c1225e16d318fd0 | refs/heads/master | 2022-12-10T16:28:17.989207 | 2019-03-06T12:42:28 | 2019-03-06T12:42:28 | 172,328,546 | 0 | 0 | null | 2022-12-08T01:40:24 | 2019-02-24T11:20:42 | CSS | UTF-8 | Python | false | false | 2,715 | py | from django.urls import path
from django.views.generic import TemplateView
from django.views.decorators.csrf import csrf_exempt
from . import views
urlpatterns = [
path('file/upload/', views.FileUploadView.as_view() ,name = 'file_upload'),
path('file/upload/success/', TemplateView.as_view(template_name='file_upload_success.html'), name='success'),
path("parameter/", views.ParameterListView.as_view(), name="view_parameter"),
path("parameter/add/", views.ParameterCreationView.as_view(), name="add_parameter"),
path("parameter/<pk>/update/", views.ParameterUpdationView.as_view(), name="update_parameter"),
path("parameter/<pk>/delete/", views.ParameterDeleteView.as_view(), name="delete_parameter"),
path("diagnosis/", views.DiagnosisListView.as_view(), name="view_diagnosis"),
path("diagnosis/add/", views.DiagnosisCreationView.as_view(), name="add_diagnosis"),
path("diagnosis/<pk>/update/", views.DiagnosisUpdationView.as_view(), name="update_diagnosis"),
path("diagnosis/<pk>/delete/", views.DiagnosisDeleteView.as_view(), name="delete_diagnosis"),
path("production/", views.ProductionListView.as_view(), name="view_production"),
path("production/add/", views.ProductionCreationView.as_view(), name="add_production"),
path("production/<pk>/update/", views.ProductionUpdationView.as_view(), name="update_production"),
path("production/<pk>/delete/", views.ProductionDeleteView.as_view(), name="delete_production"),
path("change/", views.ChangeListView.as_view(), name="view_change"),
path("change/add/", views.ChangeCreationView.as_view(), name="add_change"),
path("change/<pk>/update/", views.ChangeUpdationView.as_view(), name="update_change"),
path("change/<pk>/delete/", views.ChangeDeleteView.as_view(), name="delete_change"),
path('api/', csrf_exempt(views.ApiEndpoint.as_view()),name='api'),
path("batch/", views.BatchListView.as_view(), name="view_batch"),
path("batch/add/", views.BatchCreationView.as_view(), name="add_batch"),
path("batch/<pk>/update/", views.BatchUpadteView.as_view(), name="update_batch"),
path("batch/<pk>/delete/", views.BatchDeleteView.as_view(), name="delete_batch"),
path("batch/<pk>/", views.BatchDetailView.as_view(), name='detail_batch'),
path("product/", views.ProductListView.as_view(), name="view_product"),
path("product/add/", views.ProductCreationView.as_view(), name="add_product"),
path("product/<pk>/update/", views.ProductUpadteView.as_view(), name="update_product"),
path("product/<pk>/delete/", views.ProductDeleteView.as_view(), name="delete_product"),
path("product/<pk>/", views.ProductDetailView.as_view(), name="detail_product")
]
| [
"vighneshss@gmail.com"
] | vighneshss@gmail.com |
e49951451f6536bfc5d4289d4ed884b4a1f6fd4b | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/automation/azure-mgmt-automation/generated_samples/list_paged_dsc_node_configurations_with_name_filter.py | 3a69b16cabe5ce425a02fcf83426fd431c5572c6 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,647 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.automation import AutomationClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-automation
# USAGE
python list_paged_dsc_node_configurations_with_name_filter.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = AutomationClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
response = client.dsc_node_configuration.list_by_automation_account(
resource_group_name="rg",
automation_account_name="myAutomationAccount33",
)
for item in response:
print(item)
# x-ms-original-file: specification/automation/resource-manager/Microsoft.Automation/stable/2022-08-08/examples/listPagedDscNodeConfigurationsWithNameFilter.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | Azure.noreply@github.com |
11dae3fca1d70bbb56b846b34ffee96c891b52d4 | 9887e822ed868a6f2c57e7f1563fa4e114e91aa3 | /account/apis/mod.py | 56f2a5f14413c21c0fdd99dd77b0904771022de1 | [] | no_license | cuijianzhe/studyxing | 5d2f00508447b8d81abbd9d31966d6cdf35640a2 | 2d8bf652e0e1ed83b3078ce74400680fd159f7c1 | refs/heads/main | 2023-03-20T23:02:29.349806 | 2021-02-19T12:21:46 | 2021-02-19T12:21:46 | 328,857,296 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,562 | py | from base.api import BaseApi
from account.controllers import mod as mod_ctl
class CreateModApi(BaseApi):
need_params = {
'name': ('名称', 'required str 16'),
'sign': ('标识', 'required str 16'),
'rank': ('排序值', 'required int'),
}
def post(self, request, params):
data = mod_ctl.create_mod(**params)
return data
class UpdateModApi(BaseApi):
need_params = {
'obj_id': ('模块ID', 'required int'),
'name': ('名称', 'required str 16'),
'sign': ('标识', 'required str 16'),
'rank': ('排序值', 'required int'),
}
def post(self, request, params):
data = mod_ctl.update_mod(**params)
return data
class DeleteModApi(BaseApi):
need_params = {
'obj_id': ('模块ID', 'required int'),
}
def post(self, request, params):
data = mod_ctl.delete_mod(**params)
return data
class ListModApi(BaseApi):
NEED_PERMISSION = False
need_params = {
'keyword': ('关键词', 'optional str 16'),
'need_permission': ('是否返回权限', 'optional bool'),
'page_num': ('页码', 'optional int'),
'page_size': ('页容量', 'optional int'),
}
def post(self, request, params):
data = mod_ctl.get_mods(**params)
return data
class ModApi(BaseApi):
NEED_PERMISSION = False
need_params = {
'obj_id': ('模块ID', 'required int'),
}
def post(self, request, params):
data = mod_ctl.get_mod(**params)
return data
| [
"598941324@qq.com"
] | 598941324@qq.com |
078ea662abba209537305adab7c715e17fc0f377 | 669bde22dcc37e22d554435ec615bc498eb370c7 | /ndb/tests/unit/test_blobstore.py | 32300df49733833c4b6494a2c1835efaf6a801d2 | [
"Apache-2.0"
] | permissive | tjcelaya/google-cloud-python | a375856cd6b37a87e0b07b2c4ee82f65366f484b | c5c556ece3fa8d95c42bf9717e661645ea986e09 | refs/heads/master | 2020-04-02T19:15:04.166745 | 2018-10-25T18:07:27 | 2018-10-25T18:07:27 | 154,728,012 | 0 | 0 | Apache-2.0 | 2018-10-25T19:45:05 | 2018-10-25T19:45:05 | null | UTF-8 | Python | false | false | 5,196 | py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from google.cloud.ndb import blobstore
from google.cloud.ndb import model
import tests.unit.utils
def test___all__():
tests.unit.utils.verify___all__(blobstore)
def test_BLOB_INFO_KIND():
assert blobstore.BLOB_INFO_KIND == "__BlobInfo__"
def test_BLOB_KEY_HEADER():
assert blobstore.BLOB_KEY_HEADER == "X-AppEngine-BlobKey"
def test_BLOB_MIGRATION_KIND():
assert blobstore.BLOB_MIGRATION_KIND == "__BlobMigration__"
def test_BLOB_RANGE_HEADER():
assert blobstore.BLOB_RANGE_HEADER == "X-AppEngine-BlobRange"
class TestBlobFetchSizeTooLargeError:
@staticmethod
def test_constructor():
with pytest.raises(NotImplementedError):
blobstore.BlobFetchSizeTooLargeError()
class TestBlobInfo:
@staticmethod
def test_constructor():
with pytest.raises(NotImplementedError):
blobstore.BlobInfo()
@staticmethod
def test_get():
with pytest.raises(NotImplementedError):
blobstore.BlobInfo.get()
@staticmethod
def test_get_async():
with pytest.raises(NotImplementedError):
blobstore.BlobInfo.get_async()
@staticmethod
def test_get_multi():
with pytest.raises(NotImplementedError):
blobstore.BlobInfo.get_multi()
@staticmethod
def test_get_multi_async():
with pytest.raises(NotImplementedError):
blobstore.BlobInfo.get_multi_async()
class TestBlobInfoParseError:
@staticmethod
def test_constructor():
with pytest.raises(NotImplementedError):
blobstore.BlobInfoParseError()
class TestBlobKey:
@staticmethod
def test_constructor():
with pytest.raises(NotImplementedError):
blobstore.BlobKey()
def test_BlobKeyProperty():
assert blobstore.BlobKeyProperty is model.BlobKeyProperty
class TestBlobNotFoundError:
@staticmethod
def test_constructor():
with pytest.raises(NotImplementedError):
blobstore.BlobNotFoundError()
class TestBlobReader:
@staticmethod
def test_constructor():
with pytest.raises(NotImplementedError):
blobstore.BlobReader()
def test_create_upload_url():
with pytest.raises(NotImplementedError):
blobstore.create_upload_url()
def test_create_upload_url_async():
with pytest.raises(NotImplementedError):
blobstore.create_upload_url_async()
class TestDataIndexOutOfRangeError:
@staticmethod
def test_constructor():
with pytest.raises(NotImplementedError):
blobstore.DataIndexOutOfRangeError()
def test_delete():
with pytest.raises(NotImplementedError):
blobstore.delete()
def test_delete_async():
with pytest.raises(NotImplementedError):
blobstore.delete_async()
def test_delete_multi():
with pytest.raises(NotImplementedError):
blobstore.delete_multi()
def test_delete_multi_async():
with pytest.raises(NotImplementedError):
blobstore.delete_multi_async()
class TestError:
@staticmethod
def test_constructor():
with pytest.raises(NotImplementedError):
blobstore.Error()
def test_fetch_data():
with pytest.raises(NotImplementedError):
blobstore.fetch_data()
def test_fetch_data_async():
with pytest.raises(NotImplementedError):
blobstore.fetch_data_async()
def test_get():
# NOTE: `is` identity doesn't work for class methods
assert blobstore.get == blobstore.BlobInfo.get
def test_get_async():
# NOTE: `is` identity doesn't work for class methods
assert blobstore.get_async == blobstore.BlobInfo.get_async
def test_get_multi():
# NOTE: `is` identity doesn't work for class methods
assert blobstore.get_multi == blobstore.BlobInfo.get_multi
def test_get_multi_async():
# NOTE: `is` identity doesn't work for class methods
assert blobstore.get_multi_async == blobstore.BlobInfo.get_multi_async
class TestInternalError:
@staticmethod
def test_constructor():
with pytest.raises(NotImplementedError):
blobstore.InternalError()
def test_MAX_BLOB_FETCH_SIZE():
assert blobstore.MAX_BLOB_FETCH_SIZE == 1015808
def test_parse_blob_info():
with pytest.raises(NotImplementedError):
blobstore.parse_blob_info()
class TestPermissionDeniedError:
@staticmethod
def test_constructor():
with pytest.raises(NotImplementedError):
blobstore.PermissionDeniedError()
def test_UPLOAD_INFO_CREATION_HEADER():
assert (
blobstore.UPLOAD_INFO_CREATION_HEADER == "X-AppEngine-Upload-Creation"
)
| [
"daniel.j.hermes@gmail.com"
] | daniel.j.hermes@gmail.com |
ca297a7bed2d89938013edc7d3a4db94fad6d480 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_bordellos.py | 51b17a85a0c35d1b954d805cfb25114d8a345af1 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py |
from xai.brain.wordbase.nouns._bordello import _BORDELLO
#calss header
class _BORDELLOS(_BORDELLO, ):
def __init__(self,):
_BORDELLO.__init__(self)
self.name = "BORDELLOS"
self.specie = 'nouns'
self.basic = "bordello"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
abef8cf8af1057302a68b98d1e65ff28cc9e385c | afc8d5a9b1c2dd476ea59a7211b455732806fdfd | /Configurations/ControlRegions/DY/2016noHIPM_v9/configuration_noJER.py | d4274642345e2a5a4144a73e3f4b81a4d0d12afa | [] | no_license | latinos/PlotsConfigurations | 6d88a5ad828dde4a7f45c68765081ed182fcda21 | 02417839021e2112e740607b0fb78e09b58c930f | refs/heads/master | 2023-08-18T20:39:31.954943 | 2023-08-18T09:23:34 | 2023-08-18T09:23:34 | 39,819,875 | 10 | 63 | null | 2023-08-10T14:08:04 | 2015-07-28T07:36:50 | Python | UTF-8 | Python | false | false | 1,152 | py | # Configuration file to produce initial root files -- has both merged and binned ggH samples
treeName = 'Events'
tag = 'DY_2016noHIPM_v9_noJER'
# used by mkShape to define output directory for root files
outputDir = 'rootFile_noJER'
# file with TTree aliases
aliasesFile = 'aliases.py'
# file with list of variables
variablesFile = 'variables.py'
# file with list of cuts
cutsFile = 'cuts.py'
# file with list of samples
samplesFile = 'samples_noJER.py'
# file with list of samples
plotFile = 'plot_DYonly.py'
# luminosity to normalize to (in 1/fb)
# https://github.com/latinos/LatinoAnalysis/blob/UL_production/NanoGardener/python/data/TrigMaker_cfg.py#L239 (#311 #377 #445)
# 0.418771191 + 7.653261227 + 7.866107374 + 0.8740119304 = 16.8121517224
lumi = 16.81
# used by mkPlot to define output directory for plots
# different from "outputDir" to do things more tidy
outputDirPlots = 'plots_' + tag
# used by mkDatacards to define output directory for datacards
outputDirDatacard = 'datacards'
# structure file for datacard
structureFile = 'structure.py'
# nuisances file for mkDatacards and for mkShape
nuisancesFile = 'nuisances.py'
| [
"nicolo.trevisani@cern.ch"
] | nicolo.trevisani@cern.ch |
60f2c4f523e064094186abbbf27bc387e91fda43 | 3bec37b9145af3381f1bbc55745d3ef193694c46 | /EPI/16_stairs.py | 202bea239bea3ea153ee8f9e9cdb269d5baf4eb5 | [] | no_license | nuria/study | c00fa8776514ba4343d9923a9e61af5482d7454c | 57ddbafc762da7c8756b475f016c92bf391bc370 | refs/heads/master | 2023-08-05T01:00:48.923046 | 2023-07-22T14:54:48 | 2023-07-22T14:54:48 | 7,290,586 | 5 | 20 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | #!usr/local/bin
# destination is n levels up
# we can take from 1 to k events at a time
# this really needs to return 0
# if there is not a possibility
def steps(n,k):
print " {0} {1}".format(n, k)
_s = 0
if n == 1 :
_s = 1
elif k == 1:
_s = n
elif n <= 0:
_s = 0
else:
# now common case
for i in range(1, k+1):
_s+= steps(n-i,k)
print _s
return _s
if __name__=="__main__":
print steps(4,2)
| [
"nuria@wikimedia.org"
] | nuria@wikimedia.org |
aa8b21a1467006d2663da2786402889ed314b521 | 0bcca17e5b33b1fe237cdf0ff03c2cd38f2d5361 | /src/tasks/waveunet/training/train_gan.py | 2af721a719535449267267dc07c446ee57ca8b44 | [] | no_license | MattSegal/speech-enhancement | 04915d2b10eab395a33e4299fd4df442676f07de | af986a9a86060ce661e62cc444baf0e6d6757cc9 | refs/heads/master | 2020-08-03T15:55:33.759158 | 2020-01-11T05:10:59 | 2020-01-11T05:10:59 | 211,806,346 | 22 | 4 | null | null | null | null | UTF-8 | Python | false | false | 4,054 | py | """
Train WaveUNet on the noisy VCTK dataset using MSE + GAN
Batch size of 32 uses approx 5GB of GPU memory.
Uses NoGAN training schedule
https://github.com/jantic/DeOldify#what-is-nogan
"""
import torch
import torch.nn as nn
from src.datasets import NoisySpeechDataset
from src.utils.loss import LeastSquaresLoss
from src.utils.trainer import Trainer
from ..models.wave_u_net import WaveUNet
from ..models.mel_discriminator import MelDiscriminatorNet
# Checkpointing
WANDB_PROJECT = "wave-u-net"
CHECKPOINT_NAME = "wave-u-net"
# Training hyperparams
LEARNING_RATE = 1e-4
ADAM_BETAS = (0.5, 0.9)
WEIGHT_DECAY = 1e-4
DISC_WEIGHT = 1e-1
DISC_LEARNING_RATE = 4 * LEARNING_RATE
mse = nn.MSELoss()
def train(num_epochs, use_cuda, batch_size, wandb_name, subsample, checkpoint_epochs):
trainer = Trainer(num_epochs, wandb_name)
trainer.setup_checkpoints(CHECKPOINT_NAME, checkpoint_epochs)
trainer.setup_wandb(
WANDB_PROJECT,
wandb_name,
config={
"Batch Size": batch_size,
"Epochs": num_epochs,
"Adam Betas": ADAM_BETAS,
"Learning Rate": LEARNING_RATE,
"Disc Learning Rate": DISC_LEARNING_RATE,
"Disc Weight": DISC_WEIGHT,
"Weight Decay": WEIGHT_DECAY,
"Fine Tuning": False,
},
)
# Construct generator network
gen_net = trainer.load_net(WaveUNet)
gen_optimizer = trainer.load_optimizer(
gen_net,
learning_rate=LEARNING_RATE,
adam_betas=ADAM_BETAS,
weight_decay=WEIGHT_DECAY,
)
train_loader, test_loader = trainer.load_data_loaders(
NoisySpeechDataset, batch_size, subsample
)
# Construct discriminator network
disc_net = trainer.load_net(MelDiscriminatorNet)
disc_loss = LeastSquaresLoss(disc_net)
disc_optimizer = trainer.load_optimizer(
disc_net,
learning_rate=DISC_LEARNING_RATE,
adam_betas=ADAM_BETAS,
weight_decay=WEIGHT_DECAY,
)
# First, train generator using MSE loss
disc_net.freeze()
gen_net.unfreeze()
trainer.register_loss_fn(get_mse_loss)
trainer.register_metric_fn(get_mse_metric, "Loss")
trainer.input_shape = [2 ** 15]
trainer.target_shape = [2 ** 15]
trainer.output_shape = [2 ** 15]
trainer.train(gen_net, num_epochs, gen_optimizer, train_loader, test_loader)
# Next, train GAN using the output of the generator
def get_disc_loss(_, fake_audio, real_audio):
"""
We want to compare the inputs (real audio) with the generated outout (fake audio)
"""
return disc_loss.for_discriminator(real_audio, fake_audio)
def get_disc_metric(_, fake_audio, real_audio):
loss_t = disc_loss.for_discriminator(real_audio, fake_audio)
return loss_t.data.item()
disc_net.unfreeze()
gen_net.freeze()
trainer.loss_fns = []
trainer.metric_fns = []
trainer.register_loss_fn(get_disc_loss)
trainer.register_metric_fn(get_disc_metric, "Discriminator Loss")
trainer.train(gen_net, num_epochs, disc_optimizer, train_loader, test_loader)
# Finally, train the generator using the discriminator and MSE loss
def get_gen_loss(_, fake_audio, real_audio):
return disc_loss.for_generator(real_audio, fake_audio)
def get_gen_metric(_, fake_audio, real_audio):
loss_t = disc_loss.for_generator(real_audio, fake_audio)
return loss_t.data.item()
disc_net.freeze()
gen_net.unfreeze()
trainer.loss_fns = []
trainer.metric_fns = []
trainer.register_loss_fn(get_mse_loss)
trainer.register_loss_fn(get_gen_loss, weight=DISC_WEIGHT)
trainer.register_metric_fn(get_mse_metric, "Loss")
trainer.register_metric_fn(get_gen_metric, "Generator Loss")
trainer.train(gen_net, num_epochs, gen_optimizer, train_loader, test_loader)
def get_mse_loss(inputs, outputs, targets):
return mse(outputs, targets)
def get_mse_metric(inputs, outputs, targets):
mse_t = mse(outputs, targets)
return mse_t.data.item()
| [
"mattdsegal@gmail.com"
] | mattdsegal@gmail.com |
a49f91645ac0984b0ade8317da1c64816a2a1631 | 14d8418ca5990217be67aee89fdaa310db03fbba | /models/device_cluster_alert_config_pagination_response.py | 70c55decff716f5cd0201af6d0a2beef44a7c6ca | [
"Apache-2.0"
] | permissive | sachanta/lm-sdk-python | 3a16457bd2d5b880a0d238a88a9d1d5b8d9675f0 | e476d415c7279457f79b5d032a73d950af2fe96b | refs/heads/master | 2023-08-03T08:39:42.842790 | 2021-09-13T07:20:56 | 2021-09-13T07:20:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,452 | py | # coding: utf-8
"""
LogicMonitor REST API
LogicMonitor is a SaaS-based performance monitoring platform that provides full visibility into complex, hybrid infrastructures, offering granular performance monitoring and actionable data and insights. logicmonitor_sdk enables you to manage your LogicMonitor account programmatically. Note: For Python SDKs, the REQUEST parameters can contain camelCase or an underscore. However, the RESPONSE parameters will always contain an underscore. For example, the REQUEST parameter can be testLocation or test_location. The RESPONSE parameter will be test_location. # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from logicmonitor_sdk.models.device_cluster_alert_config import DeviceClusterAlertConfig # noqa: F401,E501
class DeviceClusterAlertConfigPaginationResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'total': 'int',
'search_id': 'str',
'items': 'list[DeviceClusterAlertConfig]'
}
attribute_map = {
'total': 'total',
'search_id': 'searchId',
'items': 'items'
}
def __init__(self, total=None, search_id=None, items=None): # noqa: E501
"""DeviceClusterAlertConfigPaginationResponse - a model defined in Swagger""" # noqa: E501
self._total = None
self._search_id = None
self._items = None
self.discriminator = None
if total is not None:
self.total = total
if search_id is not None:
self.search_id = search_id
if items is not None:
self.items = items
@property
def total(self):
"""Gets the total of this DeviceClusterAlertConfigPaginationResponse. # noqa: E501
:return: The total of this DeviceClusterAlertConfigPaginationResponse. # noqa: E501
:rtype: int
"""
return self._total
@total.setter
def total(self, total):
"""Sets the total of this DeviceClusterAlertConfigPaginationResponse.
:param total: The total of this DeviceClusterAlertConfigPaginationResponse. # noqa: E501
:type: int
"""
self._total = total
@property
def search_id(self):
"""Gets the search_id of this DeviceClusterAlertConfigPaginationResponse. # noqa: E501
:return: The search_id of this DeviceClusterAlertConfigPaginationResponse. # noqa: E501
:rtype: str
"""
return self._search_id
@search_id.setter
def search_id(self, search_id):
"""Sets the search_id of this DeviceClusterAlertConfigPaginationResponse.
:param search_id: The search_id of this DeviceClusterAlertConfigPaginationResponse. # noqa: E501
:type: str
"""
self._search_id = search_id
@property
def items(self):
"""Gets the items of this DeviceClusterAlertConfigPaginationResponse. # noqa: E501
:return: The items of this DeviceClusterAlertConfigPaginationResponse. # noqa: E501
:rtype: list[DeviceClusterAlertConfig]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this DeviceClusterAlertConfigPaginationResponse.
:param items: The items of this DeviceClusterAlertConfigPaginationResponse. # noqa: E501
:type: list[DeviceClusterAlertConfig]
"""
self._items = items
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DeviceClusterAlertConfigPaginationResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeviceClusterAlertConfigPaginationResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"bamboo@build01.us-west-1.logicmonitor.net"
] | bamboo@build01.us-west-1.logicmonitor.net |
738bdb26a148ff5cd279ad5cd64b547b3bc91b58 | 11334e46d3575968de5062c7b0e8578af228265b | /plib/new/speak.py | ed93af4c89a0cdd34c1bc51ae232d418b5af5777 | [] | no_license | slowrunner/Carl | 99262f16eaf6d53423778448dee5e5186c2aaa1e | 1a3cfb16701b9a3798cd950e653506774c2df25e | refs/heads/master | 2023-06-08T05:55:55.338828 | 2023-06-04T02:39:18 | 2023-06-04T02:39:18 | 145,750,624 | 19 | 2 | null | 2023-06-04T02:39:20 | 2018-08-22T18:59:34 | Roff | UTF-8 | Python | false | false | 4,399 | py | #!/usr/bin/python3
#
# speak.py Speaker utilities
# includes protection from quotes and apostrophes in phrase
# removes asterisks
# observes quietTime from 11PM until 10AM
#
# includes optional vol parameter (range 10-500 useful)
# includes optional ignore (quietTime) parameter
# Oct2019: increased volume for MonkMakes Amplified Speaker
# reduced speed to 150wpm (default was 175)
# switched to espeak-ng (supported, better quality)
# say( phrase, vol=125, anytime=False)
# whisper( phrase, vol= 50, anytime=True)
# shout( phrase, vol=250, anytime=False)
import subprocess
import sys
sys.path.append('/home/pi/Carl/plib')
import runLog
import time
debug = False
import math
# QUIET TIME is before 10AM and after 11PM
# (unless told to ignore , then never quietTime
def quietTime(startOK=10,notOK=23,ignore=False):
timeNow = time.localtime()
if debug:
print("time.localtime().tm_hour():",timeNow.tm_hour)
print("startOK: {} notOK: {}".format(startOK, notOK))
if (ignore):
return False
elif (startOK <= timeNow.tm_hour < notOK):
return False
else:
return True
# used when espeak was broke
def say_flite(phrase,vol=100,anytime=False):
phrase = phrase.replace("I'm","I m")
phrase = phrase.replace("'","")
phrase = phrase.replace('"',' quote ')
phrase = phrase.replace('*',"")
# flite volume is double millibels from 0 to -6000
# whisper should be around 35-40%
# say/normal volume is around 80
# shout is like 100 to 150, distorts at 170
YYY = int(2000 * (math.log(int(vol)/100.0)))
if (quietTime(ignore=anytime)):
print("QuietTime speak request: {} at vol: {}".format(phrase,vol))
else:
try:
subprocess.check_output(['flite -t "%s" -o tmp.wav' % phrase], stderr=subprocess.STDOUT, shell=True)
subprocess.check_output(['omxplayer --vol "%d" tmp.wav' % YYY], stderr=subprocess.STDOUT, shell=True)
subprocess.check_output(['rm tmp.wav'], stderr=subprocess.STDOUT, shell=True)
except KeyboardInterrupt:
sys.exit(0)
# Speak a phrase using espeak
# Options: vol: 10 is whisper, 50 is "normal Carl", 200 is shouting, 500 is screaming
# anytime: True means ignore quietTime check
def say_espeak(phrase,vol=100,anytime=False):
phrase = phrase.replace("I'm","I m")
phrase = phrase.replace("'","")
phrase = phrase.replace('"',' quote ')
phrase = phrase.replace('*',"")
# subprocess.check_output(['espeak -ven+f3 -s200 "%s"' % phrase], stderr=subprocess.STDOUT, shell=True)
if (quietTime(ignore=anytime)):
print("QuietTime speak request: {} at vol: {}".format(phrase,vol))
else:
# subprocess.check_output(['espeak -ven-us+f5 -a'+str(vol)+' "%s"' % phrase], stderr=subprocess.STDOUT, shell=True)
subprocess.check_output(['espeak-ng -s150 -ven-us+f5 -a'+str(vol)+' "%s"' % phrase], stderr=subprocess.STDOUT, shell=True)
def say(phrase,vol=125,anytime=False):
say_espeak(phrase,vol,anytime)
# vol = 50 for HP amplified spkr
# vol = vol + 40 # adjust for flite
# say_flite(phrase,vol,anytime)
def shout(phrase,vol=250,anytime=False):
say_espeak(phrase,vol,anytime)
# vol = vol - 50 # adjust for flite
# say_flite(phrase,vol,anytime)
def whisper(phrase,vol=30,anytime=False):
say_espeak(phrase,vol,anytime)
# vol = vol + 30 # adjust for flite
# say_flite(phrase,vol,anytime=False)
# ##### MAIN ####
@runLog.logRun
def main():
global debug
# say("hello from speak dot p y test main")
# say_espeak("whats the weather, long quiet?")
if (len(sys.argv) >1):
strToSay = sys.argv[1]
if ( len(sys.argv)>2 ):
vol=int(sys.argv[2])
else:
vol=50
if ( len(sys.argv)>3 ):
ignore= ( sys.argv[3] == "True" )
else:
ignore=False
say(strToSay,vol,ignore)
else:
debug = True
say("Just saying. This phrase contained an apostrophe which isn't allowed")
whisper('I need to whisper. This phrase contains "a quoted word" ')
shout("I feel like shouting. My name is Carl. ")
whisper("Whisper at 20. I don't know Pogo. Never met the little bot",20,True)
if __name__ == "__main__":
main()
| [
"slowrunner@users.noreply.github.com"
] | slowrunner@users.noreply.github.com |
400a7765748d145a2e7be58f1fa69798b3b9e1b3 | 717f5324f8d4ce44a94e2c0b654a2d2a4f0a3c74 | /dwi_ml/training/utils/monitoring.py | 3b7235fdf71fa311640ad471f932599b25d9c149 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jhlegarreta/dwi_ml | 3ac7ef28f3bba13f34a8f38a9f910cf2946dcc7b | a7c03f26780677e4eaccff9b381d5a8ec6120293 | refs/heads/master | 2023-03-04T07:22:15.737775 | 2023-02-23T18:20:53 | 2023-02-23T18:20:53 | 242,525,253 | 0 | 0 | MIT | 2020-06-10T00:53:06 | 2020-02-23T13:49:01 | Python | UTF-8 | Python | false | false | 7,185 | py | # -*- coding: utf-8 -*-
from collections import deque
import timeit
from datetime import datetime
from typing import List, Tuple
import numpy as np
class TimeMonitor(object):
def __init__(self):
self.epoch_durations = []
self._start_time = None
def start_new_epoch(self):
self._start_time = datetime.now()
def end_epoch(self):
if self._start_time is None:
raise ValueError("You should not end the epoch; it has not "
"started (or you haven't told the TimeMonitor).")
duration = datetime.now() - self._start_time
# Saving duration in minutes
self.epoch_durations.append(duration.total_seconds() / 60)
self._start_time = None
class BatchHistoryMonitor(object):
""" History of some value for each iteration during training, and mean
value for each epoch.
Example of usage: History of the loss during training.
loss_monitor = ValueHistoryMonitor()
...
loss_monitor.start_new_epoch()
# Call update at each iteration
loss_monitor.update(2.3)
...
loss_monitor.end_epoch() # call at epoch end
...
loss_monitor.epochs_means # returns the loss curve as a list
"""
def __init__(self, weighted: bool = False):
self.is_weighted = weighted
# State:
self.current_batch_values = []
self.current_batch_weights = []
self.average_per_epoch = []
self.current_epoch = -1
def update(self, value, weight=None):
"""
Note. Does not save the update if value is inf.
Parameters
----------
value: The value to be monitored.
weight: The weight in the average. For instance, for a loss monitor,
you should measure the loss average.
"""
if np.isinf(value):
return
self.current_batch_values.append(value)
if self.is_weighted:
self.current_batch_weights.append(weight)
def start_new_epoch(self):
assert len(self.average_per_epoch) == self.current_epoch + 1, \
"Did you forget to end previous epoch? Number of epoch values " \
"is {} but monitor's current epoch is {}" \
.format(len(self.average_per_epoch), self.current_epoch)
self.current_epoch += 1
self.current_batch_values = []
self.current_batch_weights = []
def end_epoch(self):
"""
Compute mean of current epoch and add it to means values.
"""
if not self.is_weighted:
mean_value = np.mean(self.current_batch_values)
else:
mean_value = sum(np.multiply(self.current_batch_values,
self.current_batch_weights))
mean_value /= sum(self.current_batch_weights)
self.average_per_epoch.append(mean_value)
def get_state(self):
# Not saving current batch values. Checkpoints should be saved only at
# the end of epochs.
return {'average_per_epoch': self.average_per_epoch,
'current_epoch': self.current_epoch,
'is_weighted': self.is_weighted,
}
def set_state(self, state):
self.average_per_epoch = state['average_per_epoch']
self.current_epoch = state['current_epoch']
class BestEpochMonitoring(object):
"""
Object to stop training early if the loss doesn't improve after a given
number of epochs ("patience").
"""
def __init__(self, patience: int, min_eps: float = 1e-6):
"""
Parameters
-----------
patience: int
Maximal number of bad epochs we allow.
min_eps: float, optional
Precision term to define what we consider as "improving": when the
loss is at least min_eps smaller than the previous best loss.
"""
self.patience = patience
self.min_eps = min_eps
self.best_value = None
self.best_epoch = None
self.n_bad_epochs = None
def update(self, loss, epoch):
"""
Parameters
----------
loss : float
Loss value for a new training epoch
epoch : int
Current epoch
Returns
-------
is_bad: bool
True if this epoch was a bad epoch.
"""
if self.best_value is None:
# First epoch. Setting values.
self.best_value = loss
self.best_epoch = epoch
self.n_bad_epochs = 0
return False
elif loss < self.best_value - self.min_eps:
# Improving from at least eps.
self.best_value = loss
self.best_epoch = epoch
self.n_bad_epochs = 0
return False
else:
# Not improving enough
self.n_bad_epochs += 1
return True
@property
def is_patience_reached(self):
"""
Returns
-------
True if the number of epochs without improvements is more than the
patience.
"""
if self.n_bad_epochs >= self.patience:
return True
return False
def get_state(self):
""" Get object state """
return {'patience': self.patience,
'min_eps': self.min_eps,
'best_value': self.best_value,
'best_epoch': self.best_epoch,
'n_bad_epochs': self.n_bad_epochs}
def set_state(self, state):
""" Set object state """
self.patience = state['patience']
self.min_eps = state['min_eps']
self.best_value = state['best_value']
self.best_epoch = state['best_epoch']
self.n_bad_epochs = state['n_bad_epochs']
class EarlyStoppingError(Exception):
"""Exception raised when an experiment is stopped by early-stopping
Attributes
message -- explanation of why early stopping occured"""
def __init__(self, message):
self.message = message
class IterTimer(object):
"""
Hint: After each iteration, you can check that the maximum allowed time has
not been reached by using:
# Ex: To check that time remaining is less than one iter + 30 seconds
time.time() + iter_timer.mean + 30 > max_time
# Ex: To allow some incertainty. Ex: prevent continuing in the case the
# next iter could be twice as long as usual:
time.time() + iter_timer.mean * 2.0 + 30 > max_time
"""
def __init__(self, history_len=5):
self.history = deque(maxlen=history_len)
self.iterable = None
self.start_time = None
def __call__(self, iterable):
self.iterable = iter(iterable)
return self
def __iter__(self):
return self
def __next__(self):
if self.start_time is not None:
elapsed = timeit.default_timer() - self.start_time
self.history.append(elapsed)
self.start_time = timeit.default_timer()
return next(self.iterable)
@property
def mean(self):
return np.mean(self.history) if len(self.history) > 0 else 0
| [
"emmanuelle.renauld@usherbrooke.ca"
] | emmanuelle.renauld@usherbrooke.ca |
09b6cea22c66de2eaeb07ff6a791649fafb6f042 | 5ec06dab1409d790496ce082dacb321392b32fe9 | /clients/python/generated/swaggeraemosgi/model/org_apache_sling_engine_parameters_properties.py | 8ef1802dac6ba31c137146f76e0126c5364e7ed7 | [
"Apache-2.0"
] | permissive | shinesolutions/swagger-aem-osgi | e9d2385f44bee70e5bbdc0d577e99a9f2525266f | c2f6e076971d2592c1cbd3f70695c679e807396b | refs/heads/master | 2022-10-29T13:07:40.422092 | 2021-04-09T07:46:03 | 2021-04-09T07:46:03 | 190,217,155 | 3 | 3 | Apache-2.0 | 2022-10-05T03:26:20 | 2019-06-04T14:23:28 | null | UTF-8 | Python | false | false | 8,829 | py | """
Adobe Experience Manager OSGI config (AEM) API
Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API # noqa: E501
The version of the OpenAPI document: 1.0.0-pre.0
Contact: opensource@shinesolutions.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
import nulltype # noqa: F401
from swaggeraemosgi.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from swaggeraemosgi.model.config_node_property_boolean import ConfigNodePropertyBoolean
from swaggeraemosgi.model.config_node_property_integer import ConfigNodePropertyInteger
from swaggeraemosgi.model.config_node_property_string import ConfigNodePropertyString
globals()['ConfigNodePropertyBoolean'] = ConfigNodePropertyBoolean
globals()['ConfigNodePropertyInteger'] = ConfigNodePropertyInteger
globals()['ConfigNodePropertyString'] = ConfigNodePropertyString
class OrgApacheSlingEngineParametersProperties(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'sling_default_parameter_encoding': (ConfigNodePropertyString,), # noqa: E501
'sling_default_max_parameters': (ConfigNodePropertyInteger,), # noqa: E501
'file_location': (ConfigNodePropertyString,), # noqa: E501
'file_threshold': (ConfigNodePropertyInteger,), # noqa: E501
'file_max': (ConfigNodePropertyInteger,), # noqa: E501
'request_max': (ConfigNodePropertyInteger,), # noqa: E501
'sling_default_parameter_check_for_additional_container_parameters': (ConfigNodePropertyBoolean,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'sling_default_parameter_encoding': 'sling.default.parameter.encoding', # noqa: E501
'sling_default_max_parameters': 'sling.default.max.parameters', # noqa: E501
'file_location': 'file.location', # noqa: E501
'file_threshold': 'file.threshold', # noqa: E501
'file_max': 'file.max', # noqa: E501
'request_max': 'request.max', # noqa: E501
'sling_default_parameter_check_for_additional_container_parameters': 'sling.default.parameter.checkForAdditionalContainerParameters', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""OrgApacheSlingEngineParametersProperties - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
sling_default_parameter_encoding (ConfigNodePropertyString): [optional] # noqa: E501
sling_default_max_parameters (ConfigNodePropertyInteger): [optional] # noqa: E501
file_location (ConfigNodePropertyString): [optional] # noqa: E501
file_threshold (ConfigNodePropertyInteger): [optional] # noqa: E501
file_max (ConfigNodePropertyInteger): [optional] # noqa: E501
request_max (ConfigNodePropertyInteger): [optional] # noqa: E501
sling_default_parameter_check_for_additional_container_parameters (ConfigNodePropertyBoolean): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| [
"cliffano@gmail.com"
] | cliffano@gmail.com |
e2ad5a6ddf46c76e3e62446828c50a10623f0847 | 7a704e838d89f942a1099fec141f1fbe9828e528 | /hysia/core/monitor/monitor.py | edea0e3a6e17a14265ce4c732512112661249259 | [
"Apache-2.0"
] | permissive | cap-ntu/Video-to-Retail-Platform | 3ee00d22b7fd94925adac08c5ea733ee647f4574 | 757c68d9de0778e3da8bbfa678d89251a6955573 | refs/heads/hysia_v2 | 2023-02-14T05:22:16.792928 | 2021-01-10T02:31:43 | 2021-01-10T02:31:43 | 212,741,650 | 63 | 20 | Apache-2.0 | 2021-01-10T02:32:00 | 2019-10-04T05:22:08 | Python | UTF-8 | Python | false | false | 7,021 | py | # Desc: Monitor to keep track of system statistics including CPU, GPU, memory, process and network.
# Author: Zhou Shengsheng
# Date: 24/04/19
# References:
# (1) Get cpu info:
# * py-cpuinfo: https://github.com/workhorsy/py-cpuinfo
# * psutil: https://github.com/giampaolo/psutil
# (2) Get gpu info:
# * gputil: https://github.com/ZhouShengsheng/gputil
# install cmd: pip install git+git://github.com/ZhouShengsheng/gputil.git@master
# (3) Get memory, process and network stats:
# * psutil: https://github.com/giampaolo/psutil
import time
import os
import cpuinfo
import psutil
import GPUtil
from .sys_stat import SysStat
from .cpu_stat import CPUStat
from .memory_stat import MemoryStat
from .gpu_stat import GPUStat
from .process_stat import *
from .gpu_process_stat import GPUProcessStat
from .network_stat import *
class Monitor(object):
"""Monitor to keep track of system statistics including CPU, GPU, memory, process and network."""
def __init__(self):
# Query and cache cpu static stat
self.__cachedCPUStaticStat = self.__queryCPUStaticStat()
# Create sys stat
self.__sysStat = SysStat()
def getSysStat(self):
"""
Get system statistics. This function will always get the latest system stats.
Returns:
sysStat (SysStat): System statistics.
"""
sysStat = self.__sysStat
sysStat.upTime = time.time() - psutil.boot_time()
sysStat.cpuStat = self.__queryCPUStat()
sysStat.memoryStat = self.__queryMemoryStat()
sysStat.gpuStats = self.__queryGPUStats()
sysStat.gpuCount = len(sysStat.gpuStats)
sysStat.processStat, sysStat.processStats = self.__queryProcessStats()
sysStat.processCount = len(sysStat.processStats)
sysStat.gpuProcessStats = self.__queryGPUProcessStats()
sysStat.gpuProcessCount = len(sysStat.gpuProcessStats)
sysStat.networkStats = self.__queryNetworkStats()
sysStat.networkCount = len(sysStat.networkStats)
return self.__sysStat
def __queryCPUStaticStat(self):
"""
Query cpu static stat.
Returns:
cpuStaticStat (list): CPU static statistics including
model, count, freqs and cache.
"""
cpuInfo = cpuinfo.get_cpu_info()
model = cpuInfo['brand']
count = cpuInfo['count']
extractFloat = lambda s: float(s.split()[0])
cache = (extractFloat(cpuInfo['l1_data_cache_size']) +
extractFloat(cpuInfo['l1_instruction_cache_size']) +
extractFloat(cpuInfo['l2_cache_size']) +
extractFloat(cpuInfo['l3_cache_size']))
freqs = psutil.cpu_freq()
freqs = (freqs[0], freqs[1], freqs[2])
return (model, count, freqs, cache)
def __queryCPUStat(self):
"""
Query cpu stat.
Returns:
cpuStat (CPUStat): CPU statistics.
"""
cpuStaticStat = self.__cachedCPUStaticStat
loads = os.getloadavg()
utilization = psutil.cpu_percent() / 100.
cpuTimes = tuple(psutil.cpu_times())
cpuTimesRatio = tuple(x / 100. for x in psutil.cpu_times_percent())
return CPUStat(cpuStaticStat[0], cpuStaticStat[1], cpuStaticStat[2], cpuStaticStat[3],
loads, utilization, cpuTimes, cpuTimesRatio)
def __queryMemoryStat(self):
"""
Query memory stat.
Returns:
memoryStat (MemoryStat): Memory statistics.
"""
vm = psutil.virtual_memory()
swap = psutil.swap_memory()
return MemoryStat(vm[0], vm[1], vm[2] / 100., vm[3], vm[4], vm[7], vm[8], vm[9],
swap[0], swap[1], swap[2], swap[3] / 100.)
def __queryGPUStats(self):
"""
Query stats for all GPUs.
Returns:
gpuStats (list): GPU statistics.
"""
gpus = GPUtil.getGPUs()
if gpus:
return [GPUStat(gpu.id, gpu.uuid, gpu.name, gpu.memoryTotal, gpu.memoryUsed,
gpu.memoryFree, gpu.memoryUtil, gpu.temperature) for gpu in gpus]
return []
def __queryProcessStats(self):
"""
Query stats for all processes.
Returns:
processStats(list): Process statistics.
"""
selfStat = None
stats = []
# Get current pid
pid = os.getpid()
# Iterate over all processes
for proc in psutil.process_iter():
try:
pinfo = proc.as_dict()
except psutil.NoSuchProcess:
pass
else:
cpuTimes = pinfo['cpu_times']
cpuTimes = (cpuTimes.user, cpuTimes.system,
cpuTimes.children_user, cpuTimes.children_system)
memoryInfo = pinfo['memory_info']
memoryInfo = (memoryInfo.rss, memoryInfo.vms, memoryInfo.shared,
memoryInfo.text, memoryInfo.lib, memoryInfo.data, memoryInfo.dirty)
status = ProcessStatus.fromPsutil(proc.status())
ctxSwitches = (pinfo['num_ctx_switches'].voluntary, pinfo['num_ctx_switches'].involuntary)
fdCount = pinfo['num_fds'] if pinfo['num_fds'] else 0
threadCount = pinfo['num_threads'] if pinfo['num_threads'] else 0
stat = ProcessStat(pinfo['pid'], pinfo['name'], pinfo['ppid'], cpuTimes,
pinfo['cpu_percent'] / 100., memoryInfo, status, pinfo['nice'],
pinfo['ionice'].value, ctxSwitches, fdCount, threadCount)
if not selfStat and pid == stat.pid:
selfStat = stat
stats.append(stat)
return selfStat, stats
def __queryGPUProcessStats(self):
"""
Query stats for all GPU processes.
Returns:
gpuProcessStats (list): GPU process statistics.
"""
processes = GPUtil.getGPUProcesses()
if processes:
return [GPUProcessStat(proc.pid, proc.processName, proc.gpuId, proc.gpuUuid,
proc.usedMemory) for proc in processes]
return []
def __queryNetworkStats(self):
ifStatDict = psutil.net_if_stats()
if not ifStatDict:
return []
ifAddrDict = psutil.net_if_addrs()
stats = []
for nic, ifStat in ifStatDict.items():
stat = NetworkStat()
stat.nic = nic
stat.isUp = ifStat.isup
stat.duplex = NicDuplexType.fromPsutil(ifStat.duplex)
stat.speed = ifStat.speed
stat.mtu = ifStat.mtu
ifAddrs = ifAddrDict[nic]
addrs = []
for ifAddr in ifAddrs:
addrs.append((ifAddr.family, ifAddr.address, ifAddr.netmask,
ifAddr.broadcast, ifAddr.ptp))
stat.addrs = addrs
stats.append(stat)
return stats
| [
"huaizhen001@e.ntu.edu.sg"
] | huaizhen001@e.ntu.edu.sg |
dc7e70ccb0606c7f9908388b7da170fab6cdf779 | 32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd | /benchmark/goodweather/testcase/firstcases/testcase9_012.py | c77b226b597af302f03c194e9841e8638279fae3 | [] | no_license | Prefest2018/Prefest | c374d0441d714fb90fca40226fe2875b41cf37fc | ac236987512889e822ea6686c5d2e5b66b295648 | refs/heads/master | 2021-12-09T19:36:24.554864 | 2021-12-06T12:46:14 | 2021-12-06T12:46:14 | 173,225,161 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,771 | py | #coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'org.asdtm.goodweather',
'appActivity' : 'org.asdtm.goodweather.MainActivity',
'resetKeyboard' : True,
'androidCoverage' : 'org.asdtm.goodweather/org.asdtm.goodweather.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
# testcase012
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
driver.press_keycode(4)
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"9_012\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'org.asdtm.goodweather'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage)
| [
"prefest2018@gmail.com"
] | prefest2018@gmail.com |
9193ed0b958e728f1231e0f2ded9824e6c27ba8e | a35dabf440ca3818ed816f8df86f7cd0f79cca4a | /regulations/tests/templatetags_in_context_tests.py | 082b89892bdf6d50c80749b47b87ee16e9ffbd4f | [
"CC0-1.0"
] | permissive | DalavanCloud/regulations-site | 8b7afba8d46c313a7ff06bb6b3778e8ad5516b11 | 0ed37754a8025b6e7d631cf482e987600a6c884b | refs/heads/master | 2020-04-25T06:42:15.255566 | 2018-12-03T21:08:07 | 2018-12-03T21:08:07 | 172,589,907 | 1 | 0 | NOASSERTION | 2019-02-25T21:43:45 | 2019-02-25T21:43:45 | null | UTF-8 | Python | false | false | 1,729 | py | from unittest import TestCase
from django.template import Context, Template
class TemplatetagsInContextTest(TestCase):
def test_in_context(self):
text = "{% load in_context %}"
text += "1. {{ f1 }}{{ f2 }}{{ f3 }}{{ f4 }}\n"
text += "{% begincontext c1 %}\n"
text += "2. {{ f1 }}{{ f2 }}{{ f3 }}{{ f4 }}\n"
text += "{% endcontext %}{% begincontext c1 c2 %}\n"
text += "3. {{ f1 }}{{ f2 }}{{ f3 }}{{ f4 }}\n"
text += "{% begincontext c2a %}\n"
text += "4. {{ f1 }}{{ f2 }}{{ f3 }}{{ f4 }}\n"
text += "{% endcontext %}{% endcontext %}\n"
text += "5. {{ f1 }}{{ f2 }}{{ f3 }}{{ f4 }}"
context = {'f1': 'f1',
'c1': {'f2': 'c1.f2', 'f1': 'c1.f1'},
'c2': {'f2': 'c2.f2',
'f3': 'c2.f3', 'c2a': {'f4': 'c2a.f4'}}}
output = Template(text).render(Context(context))
lines = output.split("\n")
self.assertEqual("1. f1", lines[0])
self.assertEqual("2. c1.f1c1.f2", lines[2])
self.assertEqual("3. c1.f1c2.f2c2.f3", lines[4])
self.assertEqual("4. c2a.f4", lines[6])
self.assertEqual("5. f1", lines[8])
def test_in_context_cascade(self):
"""Make sure fields that are not dicts get passed along"""
text = "{% load in_context %}{% begincontext c1 f2 %}"
text += "{{ f1 }}{{ f2 }}\n"
text += "{% endcontext %}"
text += "{{ f1 }}{{ f2 }}"
context = {'f1': 'f1', 'f2': 'f2', 'c1': {'f1': 'c1.f1'}}
output = Template(text).render(Context(context))
lines = output.split("\n")
self.assertEqual("c1.f1f2", lines[0])
self.assertEqual("f1f2", lines[1])
| [
"cm.lubinski@gmail.com"
] | cm.lubinski@gmail.com |
9025cb04bd40e802de1d21fdabdad114b39aff51 | 04f83aab47940b739f13c1ba102c230372966c43 | /EDSHyFT/test/SUSHyFT/das.py | e8312edfc42b3afc955558f08ff23eefa586aa47 | [] | no_license | PerilousApricot/SUSHyFT-Analyzer | 5a11909963d30c8ad7f19f499253a6753e78608a | 9f5ba528a96203459c52a0434b32311a16e2ff3b | refs/heads/master | 2016-09-15T15:31:30.617286 | 2016-03-14T20:32:09 | 2016-03-14T21:02:28 | 21,915,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,988 | py | #!/usr/bin/env python2.6
#pylint: disable-msg=C0301,C0103,R0914,R0903
"""
DAS command line tool
"""
__author__ = "Valentin Kuznetsov"
import sys
if sys.version_info < (2, 6):
raise Exception("DAS requires python 2.6 or greater")
import os
import re
import time
import json
import urllib
import urllib2
import httplib
from optparse import OptionParser
class HTTPSClientAuthHandler(urllib2.HTTPSHandler):
"""
Simple HTTPS client authentication class based on provided
key/ca information
"""
def __init__(self, key=None, cert=None, level=0):
if level:
urllib2.HTTPSHandler.__init__(self, debuglevel=1)
else:
urllib2.HTTPSHandler.__init__(self)
self.key = key
self.cert = cert
def https_open(self, req):
"""Open request method"""
#Rather than pass in a reference to a connection class, we pass in
# a reference to a function which, for all intents and purposes,
# will behave as a constructor
return self.do_open(self.get_connection, req)
def get_connection(self, host, timeout=300):
"""Connection method"""
if self.key:
return httplib.HTTPSConnection(host, key_file=self.key,
cert_file=self.cert)
return httplib.HTTPSConnection(host)
class DASOptionParser:
"""
DAS cache client option parser
"""
def __init__(self):
usage = "Usage: %prog [options]\n"
usage += "For more help please visit https://cmsweb.cern.ch/das/faq"
self.parser = OptionParser(usage=usage)
self.parser.add_option("-v", "--verbose", action="store",
type="int", default=0, dest="verbose",
help="verbose output")
self.parser.add_option("--query", action="store", type="string",
default=False, dest="query",
help="specify query for your request")
msg = "host name of DAS cache server, default is https://cmsweb.cern.ch"
self.parser.add_option("--host", action="store", type="string",
default='https://cmsweb.cern.ch', dest="host", help=msg)
msg = "start index for returned result set, aka pagination,"
msg += " use w/ limit (default is 0)"
self.parser.add_option("--idx", action="store", type="int",
default=0, dest="idx", help=msg)
msg = "number of returned results (default is 10),"
msg += " use --limit=0 to show all results"
self.parser.add_option("--limit", action="store", type="int",
default=10, dest="limit", help=msg)
msg = 'specify return data format (json or plain), default plain.'
self.parser.add_option("--format", action="store", type="string",
default="plain", dest="format", help=msg)
msg = 'query waiting threshold in sec, default is 5 minutes'
self.parser.add_option("--threshold", action="store", type="int",
default=300, dest="threshold", help=msg)
msg = 'specify private key file name'
self.parser.add_option("--key", action="store", type="string",
default="", dest="ckey", help=msg)
msg = 'specify private certificate file name'
self.parser.add_option("--cert", action="store", type="string",
default="", dest="cert", help=msg)
def get_opt(self):
"""
Returns parse list of options
"""
return self.parser.parse_args()
def convert_time(val):
"Convert given timestamp into human readable format"
if isinstance(val, int) or isinstance(val, float):
return time.strftime('%d/%b/%Y_%H:%M:%S_GMT', time.gmtime(val))
return val
def size_format(uinput):
"""
Format file size utility, it converts file size into KB, MB, GB, TB, PB units
"""
try:
num = float(uinput)
except Exception as _exc:
return uinput
base = 1000. # power of 10, or use 1024. for power of 2
for xxx in ['', 'KB', 'MB', 'GB', 'TB', 'PB']:
if num < base:
return "%3.1f%s" % (num, xxx)
num /= base
def unique_filter(rows):
"""
Unique filter drop duplicate rows.
"""
old_row = {}
row = None
for row in rows:
row_data = dict(row)
try:
del row_data['_id']
del row_data['das']
del row_data['das_id']
del row_data['cache_id']
except:
pass
old_data = dict(old_row)
try:
del old_data['_id']
del old_data['das']
del old_data['das_id']
del old_data['cache_id']
except:
pass
if row_data == old_data:
continue
if old_row:
yield old_row
old_row = row
yield row
def get_value(data, filters):
"""Filter data from a row for given list of filters"""
for ftr in filters:
if ftr.find('>') != -1 or ftr.find('<') != -1 or ftr.find('=') != -1:
continue
row = dict(data)
values = set()
for key in ftr.split('.'):
if isinstance(row, dict) and row.has_key(key):
if key == 'creation_time':
row = convert_time(row[key])
elif key == 'size':
row = size_format(row[key])
else:
row = row[key]
if isinstance(row, list):
for item in row:
if isinstance(item, dict) and item.has_key(key):
if key == 'creation_time':
row = convert_time(item[key])
elif key == 'size':
row = size_format(item[key])
else:
row = item[key]
values.add(row)
if len(values) == 1:
yield str(values.pop())
else:
yield str(list(values))
def fullpath(path):
"Expand path to full path"
if path and path[0] == '~':
path = path.replace('~', '')
path = path[1:] if path[0] == '/' else path
path = os.path.join(os.environ['HOME'], path)
return path
def get_data(host, query, idx, limit, debug, threshold=300, ckey=None, cert=None):
"""Contact DAS server and retrieve data for given DAS query"""
params = {'input':query, 'idx':idx, 'limit':limit}
path = '/das/cache'
pat = re.compile('http[s]{0,1}://')
if not pat.match(host):
msg = 'Invalid hostname: %s' % host
raise Exception(msg)
url = host + path
headers = {"Accept": "application/json"}
encoded_data = urllib.urlencode(params, doseq=True)
url += '?%s' % encoded_data
req = urllib2.Request(url=url, headers=headers)
if ckey and cert:
ckey = fullpath(ckey)
cert = fullpath(cert)
hdlr = HTTPSClientAuthHandler(ckey, cert, debug)
else:
hdlr = urllib2.HTTPHandler(debuglevel=debug)
opener = urllib2.build_opener(hdlr)
fdesc = opener.open(req)
data = fdesc.read()
fdesc.close()
pat = re.compile(r'^[a-z0-9]{32}')
if data and isinstance(data, str) and pat.match(data) and len(data) == 32:
pid = data
else:
pid = None
sleep = 1 # initial waiting time in seconds
wtime = 30 # final waiting time in seconds
time0 = time.time()
while pid:
params.update({'pid':data})
encoded_data = urllib.urlencode(params, doseq=True)
url = host + path + '?%s' % encoded_data
req = urllib2.Request(url=url, headers=headers)
try:
fdesc = opener.open(req)
data = fdesc.read()
fdesc.close()
except urllib2.HTTPError as err:
return json.dumps({"status":"fail", "reason":str(err)})
if data and isinstance(data, str) and pat.match(data) and len(data) == 32:
pid = data
else:
pid = None
time.sleep(sleep)
if sleep < wtime:
sleep *= 2
else:
sleep = wtime
if (time.time()-time0) > threshold:
reason = "client timeout after %s sec" % int(time.time()-time0)
return json.dumps({"status":"fail", "reason":reason})
return data
def prim_value(row):
"""Extract primary key value from DAS record"""
prim_key = row['das']['primary_key']
key, att = prim_key.split('.')
if isinstance(row[key], list):
for item in row[key]:
if item.has_key(att):
return item[att]
else:
return row[key][att]
def main():
"""Main function"""
optmgr = DASOptionParser()
opts, _ = optmgr.get_opt()
host = opts.host
debug = opts.verbose
query = opts.query
idx = opts.idx
limit = opts.limit
thr = opts.threshold
ckey = opts.ckey
cert = opts.cert
if not query:
raise Exception('You must provide input query')
data = get_data(host, query, idx, limit, debug, thr, ckey, cert)
if opts.format == 'plain':
jsondict = json.loads(data)
if not jsondict.has_key('status'):
print 'DAS record without status field:\n%s' % jsondict
return
if jsondict['status'] != 'ok':
print "status: %s reason: %s" \
% (jsondict.get('status'), jsondict.get('reason', 'N/A'))
return
nres = jsondict['nresults']
if not limit:
drange = '%s' % nres
else:
drange = '%s-%s out of %s' % (idx+1, idx+limit, nres)
if opts.limit:
msg = "\nShowing %s results" % drange
msg += ", for more results use --idx/--limit options\n"
print msg
mongo_query = jsondict['mongo_query']
unique = False
fdict = mongo_query.get('filters', {})
filters = fdict.get('grep', [])
aggregators = mongo_query.get('aggregators', [])
if 'unique' in fdict.keys():
unique = True
if filters and not aggregators:
data = jsondict['data']
if isinstance(data, dict):
rows = [r for r in get_value(data, filters)]
print ' '.join(rows)
elif isinstance(data, list):
if unique:
data = unique_filter(data)
for row in data:
rows = [r for r in get_value(row, filters)]
print ' '.join(rows)
else:
print jsondict
elif aggregators:
data = jsondict['data']
if unique:
data = unique_filter(data)
for row in data:
if row['key'].find('size') != -1 and \
row['function'] == 'sum':
val = size_format(row['result']['value'])
else:
val = row['result']['value']
print '%s(%s)=%s' \
% (row['function'], row['key'], val)
else:
data = jsondict['data']
if isinstance(data, list):
old = None
val = None
for row in data:
val = prim_value(row)
if not opts.limit:
if val != old:
print val
old = val
else:
print val
if val != old and not opts.limit:
print val
elif isinstance(data, dict):
print prim_value(data)
else:
print data
else:
print data
#
# main
#
if __name__ == '__main__':
main()
| [
"andrew.m.melo@vanderbilt.edu"
] | andrew.m.melo@vanderbilt.edu |
c06eb32013963d9155ed9bfbf49eab85864be127 | 48e361837c24ea3def1d8ddbe6191368a03ae50e | /python/sandbox/test.py | 96b9d29047d21934cb78fd127384349c64c9bc7f | [] | no_license | paglenn/random | 7f2134d3eeed7aebbd8f20e3df7299df58d99704 | 30e2a8522c303b1518960d7bf44220996e66c6ea | refs/heads/master | 2021-06-14T11:57:36.827255 | 2017-04-10T04:27:35 | 2017-04-10T04:27:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 783 | py | # 2d self-avoiding random walk
#high rejection rate --> VERY SLOW...
import numpy as np
import matplotlib.pyplot as pp
import random
def SAW ( Nsteps):
goodPath = 0
while goodPath == 0:
X = [0 for i in range(Nsteps) ]
Y = [0 for i in range(Nsteps) ]
visited_sites = [(0,0) for i in range(Nsteps) ]
for step in range(1,Nsteps):
directions = [(0,1),(0,-1),(1,0),(-1,0)]
random_dir = random.choice(directions)
x = X[step-1] + random_dir[0]
y = Y[step-1] + random_dir[1]
if (x,y) in visited_sites:
goodPath = 0
break
else:
X[step] = x
Y[step] = y
visited_sites[step] = (x,y)
goodPath = 1
return visited_sites
import time
start_time = time.time()
for i in range(10000):J = SAW(10)
print("{0:e}".format(time.time()-start_time) )
| [
"nls.pglenn@gmail.com"
] | nls.pglenn@gmail.com |
7f082c545f267641e11c6aae37630408fd76f091 | ba88b66e61f0fd1ec0719b61568f0c883d02e534 | /inventory/urls.py | b2223228bde84132965119ec38b72aeb6a9e36c8 | [] | no_license | bnmng/spltcs | fbc9b5fb5342f5ee0a8bd080f957b4022509b3e9 | 5f19136d8a266b3d2094397cafe41b3ca1f45e78 | refs/heads/master | 2020-12-26T18:47:07.348996 | 2020-08-02T21:57:44 | 2020-08-02T21:57:44 | 237,602,374 | 0 | 0 | null | 2020-03-03T15:07:04 | 2020-02-01T11:07:46 | Python | UTF-8 | Python | false | false | 3,359 | py | from django.urls import path
from inventory.views import (CategoryAjaxMakeModels, CategoryAjaxSuccessMakeModels, CategoryCreate, CategoryDelete, CategoryDetail, CategoryList, CategoryUpdate, ItemAjaxEntity, ItemAjaxLocation, ItemAjaxMakeModel, ItemAjaxRole, ItemAjaxSuccessEntity, ItemAjaxSuccessLocation, ItemAjaxSuccessMakeModel, ItemAjaxSuccessRole, ItemCreate, ItemDelete, ItemDetail, ItemList, ItemUpdate, MakeModelCreate, MakeModelDelete, MakeModelDetail, MakeModelList, MakeModelUpdate, RoleAjaxItems, RoleAjaxSuccessItems, RoleCreate, RoleDelete, RoleDetail, RoleList, RoleUpdate)
urlpatterns = [
path('', ItemList.as_view(), name='inventory'),
path('items', ItemList.as_view(), name='item_list'),
path('create', ItemCreate.as_view(), name='item_create'),
path('<int:pk>', ItemDetail.as_view(), name='item_detail'),
path('<int:pk>/update', ItemUpdate.as_view(), name='item_update'),
path('<int:pk>/delete', ItemDelete.as_view(), name='item_delete'),
path('item_ajax_location', ItemAjaxLocation.as_view(), name='item_ajax_location'),
path('item_ajax_location/<int:pk>', ItemAjaxSuccessLocation.as_view(), name='item_ajaxsuccess_location'),
path('item_ajax_makemodel', ItemAjaxMakeModel.as_view(), name='item_ajax_makemodel'),
path('item_ajax_makemodel/<int:pk>', ItemAjaxSuccessMakeModel.as_view(), name='item_ajaxsuccess_makemodel'),
path('item_ajax_role', ItemAjaxRole.as_view(), name='item_ajax_role'),
path('item_ajax_role/<int:pk>', ItemAjaxSuccessRole.as_view(), name='item_ajaxsuccess_role'),
path('item_ajax_entity', ItemAjaxEntity.as_view(), name='item_ajax_entity'),
path('item_ajax_entity/<int:pk>', ItemAjaxSuccessEntity.as_view(), name='item_ajaxsuccess_entity'),
path('categories', CategoryList.as_view(), name='category_list'),
path('category/create', CategoryCreate.as_view(), name='category_create'),
path('category/<int:pk>', CategoryDetail.as_view(), name='category_detail'),
path('category/<int:pk>/update', CategoryUpdate.as_view(), name='category_update'),
path('category/<int:pk>/delete', CategoryDelete.as_view(), name='category_delete'),
path('category_ajax_makemodels', CategoryAjaxMakeModels.as_view(), name='category_ajax_makemodels'),
path('category_ajax_makemodels/<int:pk>', CategoryAjaxSuccessMakeModels.as_view(), name='category_ajaxsuccess_makemodels'),
path('roles', RoleList.as_view(), name='role_list'),
path('role/create', RoleCreate.as_view(), name='role_create'),
path('role/<int:pk>', RoleDetail.as_view(), name='role_detail'),
path('role/<int:pk>/update', RoleUpdate.as_view(), name='role_update'),
path('role/<int:pk>/delete', RoleDelete.as_view(), name='role_delete'),
path('role_ajax_items', RoleAjaxItems.as_view(), name='role_ajax_items'),
path('role_ajax_items/<int:pk>', RoleAjaxSuccessItems.as_view(), name='role_ajaxsuccess_items'),
path('makemodels', MakeModelList.as_view(), name='makemodel_list'),
path('makemodel/create', MakeModelCreate.as_view(), name='makemodel_create'),
path('makemodel/<int:pk>', MakeModelDetail.as_view(), name='makemodel_detail'),
path('makemodel/<int:pk>/update', MakeModelUpdate.as_view(), name='makemodel_update'),
path('makemodel/<int:pk>/delete', MakeModelDelete.as_view(), name='makemodel_delete'),
]
# vim: ai ts=4 sts=4 et sw=4
| [
"benjamin@bnmng.com"
] | benjamin@bnmng.com |
3fad5952bb69e1a9749fdb8c1ede20354824110b | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-iam/huaweicloudsdkiam/v3/model/keystone_create_project_option.py | d91bccd26bce8e2e0e7bc236f8a4a808d20d9140 | [
"Apache-2.0"
] | permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,199 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class KeystoneCreateProjectOption:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'name': 'str',
'parent_id': 'str',
'domain_id': 'str',
'description': 'str'
}
attribute_map = {
'name': 'name',
'parent_id': 'parent_id',
'domain_id': 'domain_id',
'description': 'description'
}
def __init__(self, name=None, parent_id=None, domain_id=None, description=None):
"""KeystoneCreateProjectOption - a model defined in huaweicloud sdk"""
self._name = None
self._parent_id = None
self._domain_id = None
self._description = None
self.discriminator = None
self.name = name
self.parent_id = parent_id
if domain_id is not None:
self.domain_id = domain_id
if description is not None:
self.description = description
@property
def name(self):
"""Gets the name of this KeystoneCreateProjectOption.
项目名称。必须以存在的\"区域ID_\"开头,长度小于等于64字符。例如区域“华北-北京一”的区域ID为“cn-north-1”,在其下创建项目时,项目名应填“cn-north-1_IAMProject”
:return: The name of this KeystoneCreateProjectOption.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this KeystoneCreateProjectOption.
项目名称。必须以存在的\"区域ID_\"开头,长度小于等于64字符。例如区域“华北-北京一”的区域ID为“cn-north-1”,在其下创建项目时,项目名应填“cn-north-1_IAMProject”
:param name: The name of this KeystoneCreateProjectOption.
:type: str
"""
self._name = name
@property
def parent_id(self):
"""Gets the parent_id of this KeystoneCreateProjectOption.
区域对应的项目ID,例如区域“华北-北京一”区域对应的项目ID为:04dd42abe48026ad2fa3c01ad7fa.....,获取方式请参见:[获取账号、IAM用户、项目、用户组、委托的名称和ID](https://support.huaweicloud.com/api-iam/iam_17_0002.html)。
:return: The parent_id of this KeystoneCreateProjectOption.
:rtype: str
"""
return self._parent_id
@parent_id.setter
def parent_id(self, parent_id):
"""Sets the parent_id of this KeystoneCreateProjectOption.
区域对应的项目ID,例如区域“华北-北京一”区域对应的项目ID为:04dd42abe48026ad2fa3c01ad7fa.....,获取方式请参见:[获取账号、IAM用户、项目、用户组、委托的名称和ID](https://support.huaweicloud.com/api-iam/iam_17_0002.html)。
:param parent_id: The parent_id of this KeystoneCreateProjectOption.
:type: str
"""
self._parent_id = parent_id
@property
def domain_id(self):
"""Gets the domain_id of this KeystoneCreateProjectOption.
项目所属账号ID。
:return: The domain_id of this KeystoneCreateProjectOption.
:rtype: str
"""
return self._domain_id
@domain_id.setter
def domain_id(self, domain_id):
"""Sets the domain_id of this KeystoneCreateProjectOption.
项目所属账号ID。
:param domain_id: The domain_id of this KeystoneCreateProjectOption.
:type: str
"""
self._domain_id = domain_id
@property
def description(self):
"""Gets the description of this KeystoneCreateProjectOption.
项目描述信息,长度小于等于255字符。
:return: The description of this KeystoneCreateProjectOption.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this KeystoneCreateProjectOption.
项目描述信息,长度小于等于255字符。
:param description: The description of this KeystoneCreateProjectOption.
:type: str
"""
self._description = description
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, KeystoneCreateProjectOption):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
955c42544a26f4c5add16a7771bd1becf290236c | 3e6bf1ba30707aacc6e16a84d93e449fcd4a32b7 | /joins/migrations/0009_auto_20160707_0613.py | 652d8b1db03734245d5f3a38d1df06aa86bf6df3 | [] | no_license | osbornetunde/Lauch | a4cddea55b1af341d1d894fc7635c17cddab5707 | fe05c1974ae06919b49c607e96b387a4da602bfa | refs/heads/master | 2021-01-19T05:29:55.148350 | 2016-07-20T06:30:11 | 2016-07-20T06:30:11 | 63,755,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-07 05:13
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('joins', '0008_joinfriends'),
]
operations = [
migrations.RenameModel(
old_name='JoinFriends',
new_name='JoinFriend',
),
]
| [
"osbornetunde@gmail.com"
] | osbornetunde@gmail.com |
a0ff784d59a53f6917122e881a2dd0959023182f | 9fdff458f1e20321aaa70d4669eeacb9423c9f36 | /multi/train/AccuracyFormula.py | 7976115db2ad65189a9c6e7d0501deddc61daed7 | [] | no_license | leemengwei/GNRX | 11639716f220f4721e521998ff282ee40ca50275 | 80c5b78826187a519588af3d8c71fb40ba7b94fe | refs/heads/main | 2023-04-01T00:34:12.638628 | 2021-03-29T05:02:03 | 2021-03-29T05:02:03 | 352,524,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,184 | py | # -*- coding: UTF-8 -*-
import numpy as np
import math
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
def RMSE(values_1, values_2, baseNum) -> float:
"""均方根误差"""
if(len(values_1)==0 or len(values_2)==0 or len(values_1)!=len(values_2)):
raise BaseException("数组无值或长度不一致")
values1 = np.array(values_1)
values2 = np.array(values_2)
a = np.power((values1-values2)/baseNum, 2).sum() / len(values1)
a = math.sqrt(a)
return a
def MAE(values_1, values_2, baseNum) -> float:
"""平均绝对误差"""
if (len(values_1) == 0 or len(values_2) == 0 or len(values_1) != len(values_2)):
raise BaseException("数组无值或长度不一致")
values1 = np.array(values_1)
values2 = np.array(values_2)
a = np.abs((values1 - values2) / baseNum).sum() / len(values1)
return a
def CORR(values_1, values_2) -> float:
"""相关性系数"""
if (len(values_1) == 0 or len(values_2) == 0 or len(values_1) != len(values_2)):
raise BaseException("数组无值或长度不一致")
values1 = np.array(values_1)
values2 = np.array(values_2)
avg1 = values1.mean()
avg2 = values2.mean()
fenzi = ((values1-avg1)*(values2-avg2)).sum()/len(values1)
a = np.power(values1-avg1, 2).sum()
b = np.power(values2-avg2, 2).sum()
fenmu = math.sqrt(a/len(values1)*b/len(values2))
if fenmu == 0:
return 0
else:
return fenzi/fenmu
def HMA(P, FP) -> float:
"""调和平均数"""
Ps = np.array(P)
FPs = np.array(FP)
Ps[Ps < 0] = 0
FPs[FPs < 0] = 0
d = abs(Ps - FPs).sum()
a = abs(Ps / (Ps+FPs) - 0.5)
c = abs(Ps - FPs) / d
result = 1 - 2*(a*c).sum()
return result
def PD(values_1, values_2) -> float:
"""偏差率"""
if (len(values_1) == 0 or len(values_2) == 0 or len(values_1) != len(values_2)):
raise BaseException("数组无值或长度不一致")
values1 = np.array(values_1)
values2 = np.array(values_2)
a = np.average(np.min([np.abs(values1 - values2) / values2, [1]*len(values1)], 0))
return a
def CalcCorr_byDate(data:pd.DataFrame, titleTime:str, titleP:str, titleFP:str) -> dict:
"""计算相关性"""
dataSet = data[[titleTime,titleP,titleFP]].copy()
dataSet["Date"] = dataSet[titleTime].apply(lambda t : t.date())
dateGroup = dataSet.groupby("Date")
result = dict()
for key, group in dateGroup:
result[key] = CORR(group[titleP], group[titleFP])
return result
def CalcMAE_byDate_exclude3(data:pd.DataFrame, titleTime:str, titleP:str, titleFP:str, baseNum:float) -> dict:
"""计算平均绝对误差(排除实发小于装机3%的数据)"""
dataSet = data[[titleTime,titleP,titleFP]].copy()
# dataSet[titleFP] = dataSet[titleFP].apply(lambda p : p if p>=0 else 0)
dataSet["Date"] = dataSet[titleTime].apply(lambda t : t.date())
dateGroup = dataSet.groupby("Date")
result = dict()
for key, group in dateGroup:
group = group[group[titleP]>=baseNum*0.03]
if len(group)==0:
continue
result[key] = 1-MAE(group[titleP], group[titleFP], baseNum)
return result
def CalcRMSE_byDate(data:pd.DataFrame, titleTime:str, titleP:str, titleFP:str, baseNum:float) -> dict:
"""计算均方根误差"""
dataSet = data[[titleTime,titleP,titleFP]].copy()
# dataSet[titleFP] = dataSet[titleFP].apply(lambda p : p if p>=0 else 0)
dataSet["Date"] = dataSet[titleTime].apply(lambda t : t.date())
dateGroup = dataSet.groupby("Date")
result = dict()
for key, group in dateGroup:
if len(group) == 1:
continue
result[key] = 1-RMSE(group[titleP], group[titleFP], baseNum)
return result
def CalcKouDian_RMSE_byDate(data:pd.DataFrame, titleTime:str, titleP:str, titleFP:str, baseNum:float, threshold:float) -> dict:
"""计算均方根扣电百分百"""
dataSet = data[[titleTime,titleP,titleFP]].copy()
# dataSet[titleFP] = dataSet[titleFP].apply(lambda p : p if p>=0 else 0)
dataSet["Date"] = dataSet[titleTime].apply(lambda t : t.date())
dateGroup = dataSet.groupby("Date")
result = dict()
for key, group in dateGroup:
if len(group) == 1:
continue
acc = 1-RMSE(group[titleP], group[titleFP], baseNum)
result[key] = threshold - acc if acc < threshold else 0
return result
def CalcKouDian_MAE_byDate(data:pd.DataFrame, titleTime:str, titleP:str, titleFP:str, baseNum:float, threshold:float) -> dict: #P is gt, FP is predict, baseNum is cap, threshold is 0.85 for wind (MAE), 0.8 (RMSE)
"""计算平均绝对误差扣电百分百"""
dataSet = data[[titleTime,titleP,titleFP]].copy()
# dataSet[titleFP] = dataSet[titleFP].apply(lambda p : p if p>=0 else 0)
dataSet["Date"] = dataSet[titleTime].apply(lambda t : t.date())
dateGroup = dataSet.groupby("Date")
result = dict()
for key, group in dateGroup:
group = group[group[titleP] >= baseNum * 0.03]
if len(group) <= 1:
continue
acc = 1-MAE(group[titleP], group[titleFP], baseNum)
result[key] = threshold - acc if acc < threshold else 0
return result
def CalcTheroyMAE_byDate(data:pd.DataFrame, titleTime:str, titleP:str, title_Radia:str, title_Example:str, baseNum:float) -> dict:
"""计算理论功率精度(排除实发小于0)"""
dataSet = data[[titleTime, titleP, title_Radia, title_Example]].copy()
dataSet["Date"] = dataSet[titleTime].apply(lambda t : t.date())
dateGroup = dataSet.groupby("Date")
result = dict()
for key, group in dateGroup:
group[titleP] = group[titleP].apply(lambda a : 0 if a<0 else a)
sumP = group[titleP].sum()
sumRadia = group[title_Radia].sum()
sumExample = group[title_Example].sum()
if sumP <= 0:
continue
accRadia = 1 - math.fabs(sumP - sumRadia)/sumP if math.fabs(sumP - sumRadia) <= sumP else 0
accExample = 1 - math.fabs(sumP - sumExample)/sumP if math.fabs(sumP - sumExample) <= sumP else 0
scoreRadia = 0 if accRadia >= 0.97 else ((0.97-accRadia)*100*0.05/10*baseNum if accRadia > 0 else 0.97*100*0.05/10*baseNum)
scoreExample = 0 if accExample >= 0.97 else ((0.97-accExample)*100*0.05/10*baseNum if accExample > 0 else 0.97*100*0.05/10*baseNum)
result[key] = (accRadia, accExample, scoreRadia, scoreExample)
return result
def CalcHMA_byDate_exclude3(data:pd.DataFrame, titleTime:str, titleP:str, titleFP:str, baseNum:float) -> dict:
"""计算调和平均数准确率(排除实发小于装机3%的数据)"""
dataSet = data[[titleTime,titleP,titleFP]].copy()
# dataSet[titleFP] = dataSet[titleFP].apply(lambda p : p if p>=0 else 0)
dataSet["Date"] = dataSet[titleTime].apply(lambda t : t.date())
dateGroup = dataSet.groupby("Date")
result = dict()
for key, group in dateGroup:
group = group[(group[titleP]>baseNum*0.03) | (group[titleFP]>baseNum*0.03)]
if len(group)==0:
continue
result[key] = HMA(group[titleP], group[titleFP])
return result
def CalcDIP_byDate_backup(data:pd.DataFrame, titleTime:str, titleP:str, titleFP:str, baseNum:float, threshold=0.25) -> dict:
"""计算偏差积分电量(按指定达标线计算)"""
dataSet = data[[titleTime,titleP,titleFP]].copy()
dataSet["Date"] = dataSet[titleTime].apply(lambda t : t.date())
dateGroup = dataSet.groupby("Date")
result = dict()
for key, group in dateGroup:
Ps = np.array(group[titleP])
FPs = np.array(group[titleFP])
Ps[Ps < 0] = 0
FPs[FPs < 0] = 0
DS = abs((Ps - FPs) / FPs)
DS[(FPs==0) & (Ps<=baseNum*0.03)] = 0
DS[(FPs==0) & (Ps>baseNum*0.03)] = 1
DS[(Ps==0) & (FPs<=baseNum*0.03)] = 0
DS[(Ps==0) & (FPs>baseNum*0.03)] = 1
if True not in (DS>threshold):
result[key] = 0
else:
DP = FPs[DS>threshold]*(DS[DS>threshold]-threshold)
result[key] = DP.sum()
return result
def CalcDIP_byDate(data:pd.DataFrame, titleTime:str, titleP:str, titleFP:str, baseNum:float, threshold=0.25) -> dict:
"""计算偏差积分电量(按指定达标线计算),处理预测为 0 时结果为 0 的情况"""
dataSet = data[[titleTime,titleP,titleFP]].copy()
dataSet["Date"] = dataSet[titleTime].apply(lambda t : t.date())
dateGroup = dataSet.groupby("Date")
result = dict()
for key, group in dateGroup:
Ps = np.array(group[titleP])
FPs = np.array(group[titleFP])
Ps[Ps < 0] = 0
FPs[FPs < 0] = 0
DS = abs((Ps - FPs) / FPs)
DS[(FPs==0) & (Ps<=baseNum*0.03)] = 0
DS[(FPs==0) & (Ps>baseNum*0.03)] = 1
DS[(Ps==0) & (FPs<=baseNum*0.03)] = 0
DS[(Ps==0) & (FPs>baseNum*0.03)] = 1
overload = (DS > threshold)
if True not in (overload):
result[key] = 0
else:
group = group[overload]
DP = group.apply(lambda row : row[titleP]-row[titleFP]*1.2 if row[titleP]>=row[titleFP] else row[titleFP]*0.8-row[titleP], axis=1)
result[key] = DP.sum()
return result
def CalcDIP_byDate_3(data: pd.DataFrame, titleTime: str, titleP: str, titleFP: str, baseNum: float, threshold=0.25) -> dict:
"""计算偏差积分电量(按指定达标线计算)"""
dataSet = data[[titleTime, titleP, titleFP]].copy()
dataSet["Date"] = dataSet[titleTime].apply(lambda t: t.date())
dateGroup = dataSet.groupby("Date")
result = dict()
for key, group in dateGroup:
group[titleP] = group[titleP].apply(lambda a: a if a >= 0 else 0)
group[titleFP] = group[titleFP].apply(lambda a: a if a >= 0 else 0)
Ps = np.array(group[titleP])
FPs = np.array(group[titleFP])
DS = abs((Ps - FPs) / FPs)
DS[(FPs == 0) & (Ps <= baseNum * 0.03)] = 0
DS[(FPs == 0) & (Ps > baseNum * 0.03)] = 1
DS[(Ps == 0) & (FPs <= baseNum * 0.03)] = 0
DS[(Ps == 0) & (FPs > baseNum * 0.03)] = 1
overload = (DS > threshold)
overload_1 = (DS > threshold) & ~ (((FPs == 0) & (Ps > baseNum * 0.03)) | ((Ps == 0) & (FPs > baseNum * 0.03)))
overload_2 = (DS > threshold) & (((FPs == 0) & (Ps > baseNum * 0.03)) | ((Ps == 0) & (FPs > baseNum * 0.03)))
if True not in (overload):
result[key] = 0
else:
group_1 = group[overload_1]
group_2 = group[overload_2]
DP_1 = group_1.apply(lambda row: row["P"] - row["F_P"] * 1.2 if row["P"] >= row["F_P"] else row["F_P"] * 0.8 - row["P"],axis=1)
DP_2 = group_2.apply(lambda row: row["P"] - row["F_P"] * 1.0 if row["P"] >= row["F_P"] else row["F_P"] * 1.0 - row["P"],axis=1)
result[key] = DP_1.sum() if len(DP_1)>0 else 0 + DP_2.sum() if len(DP_2)>0 else 0
return result
def CalcPPR_byDate(data:pd.DataFrame, titleTime:str, titleP:str, titleFP:str, baseNum:float, threshold=0.8) -> dict:
"""计算合格率"""
dataSet = data[[titleTime,titleP,titleFP]].copy()
dataSet["Date"] = dataSet[titleTime].apply(lambda t : t.date())
dateGroup = dataSet.groupby("Date")
result = dict()
for key, group in dateGroup:
if len(group)==1:
continue
Ps = np.array(group[titleP])
FPs = np.array(group[titleFP])
Ps[Ps < 0] = 0
FPs[FPs < 0] = 0
DS = abs((Ps - FPs) / baseNum)
pp = DS[DS<=(1-threshold)]
ppRatio = len(pp) / len(group)
result[key] = ppRatio
return result
def CalcRMSE_byMonth(data:pd.DataFrame, titleTime:str, titleP:str, titleFP:str, baseNum:float) -> dict:
"""按整月计算均方根精度"""
dataSet = data[[titleTime,titleP,titleFP]].copy()
# dataSet[titleFP] = dataSet[titleFP].apply(lambda p : p if p>=0 else 0)
dataSet["Month"] = dataSet[titleTime].apply(lambda t : t.strftime("%Y-%m"))
dateGroup = dataSet.groupby("Month")
result = dict()
for key, group in dateGroup:
if len(group) == 1:
continue
result[key] = 1-RMSE(group[titleP], group[titleFP], baseNum)
return result
def CalcCORR_byDate(data:pd.DataFrame, titleTime:str, titleP:str, titleFP:str) -> dict:
"""计算日相关性"""
dataSet = data[[titleTime,titleP,titleFP]].copy()
dataSet["Date"] = dataSet[titleTime].apply(lambda t : t.date())
dateGroup = dataSet.groupby("Date")
result = dict()
for key, group in dateGroup:
result[key] = CORR(group[titleP], group[titleFP])
return result
def CalcDIP_Powertrading(data:pd.DataFrame, titleTime:str, titleP:str, titleFP:str, baseNum:float) -> (dict, pd.DataFrame):
"""计算偏差积分电量(甘肃电力交易)"""
dataSet = data[[titleTime,titleP,titleFP]].copy()
dataSet["Date"] = dataSet[titleTime].apply(lambda t : t.date())
dateGroup = dataSet.groupby("Date")
result = dict()
dipFrame=pd.DataFrame()
for key, group in dateGroup:
Ps = np.array(group[titleP])
FPs = np.array(group[titleFP])
Ps[Ps < 0] = 0
FPs[FPs < 0] = 0
# FPs[(FPs*0.02 < 2)] = 2
# FPs[(FPs*0.02 >= 2)&(FPs*0.05 < 5)] = 5
# FPs[(FPs*0.05 >= 5)&(FPs*0.1 < 10)] = 10
# FPs[(FPs*0.1 >= 10)&(FPs*0.2 < 20)] = 20
# FPs[(FPs*0.02 < 2)] = 2
# FPs[FPs*0.05 < 5] = 5
# FPs[FPs*0.1 < 10] = 10
# FPs[FPs*0.2 < 20] = 20
dips = Ps - FPs
coff = np.zeros(len(dips))
coff[(-0.1*FPs<=dips)&(dips<=0.02*FPs)] = 0.0
coff[((-0.2*FPs<=dips)&(dips<-0.1*FPs))|((0.02*FPs<dips)&(dips<=0.05*FPs))] = 1.0
coff[(dips<-0.2*FPs)|(0.05*FPs<dips)] = 3.0
dips_after = np.abs(dips*coff)
result[key] = dips_after.sum()
group["DIP"] = np.abs(dips)
group["DIP_After"] = dips_after
dipFrame = dipFrame.append(group.copy(),ignore_index=True)
return result, dipFrame
def CalcDIP_Powertrading2(data:pd.DataFrame, titleTime:str, titleP:str, titleFP:str, baseNum:float) -> (dict, pd.DataFrame):
"""计算预测总考核(甘肃电力交易),把预测当作计划值,所有点均考核"""
dataSet = data[[titleTime,titleP,titleFP]].copy()
dataSet["Date"] = dataSet[titleTime].apply(lambda t : t.date())
dateGroup = dataSet.groupby("Date")
piancha_down = 0.15
piancha_up = 0.05
result = dict()
dipFrame=pd.DataFrame()
for key, group in dateGroup:
group["Error"] = (group[titleP]/group[titleFP]).replace(np.inf,1) -1
group["ErrorPower"] = group.apply(lambda x: x[titleFP]*(x["Error"] - piancha_up) if x["Error"] >= piancha_up \
else (x[titleFP] * (x["Error"]+piancha_down) if x["Error"] <= 0 - piancha_down else 0), axis=1)
group.loc[((group[titleFP] * piancha_up < 5) & (group["Error"] > piancha_up)), "ErrorPower"] = 0
group.loc[((group[titleFP] * piancha_down < 5) & (group["Error"] < 0 - piancha_down)), "ErrorPower"] = 0
group["ErrorPower"] = abs(group["ErrorPower"])
result[key] = group["ErrorPower"].sum()
dipFrame = dipFrame.append(group.copy(),ignore_index=True)
return result, dipFrame
def CalcDIP_Powertrading4(data:pd.DataFrame, titleTime:str, titleP:str, titleFP:str, baseNum:float) -> (dict, pd.DataFrame):
"""计算预测总考核(甘肃电力交易),把预测当作计划值,所有点均考核"""
dataSet = data[[titleTime,titleP,titleFP]].copy()
dataSet["Date"] = dataSet[titleTime].apply(lambda t : t.date())
dateGroup = dataSet.groupby("Date")
piancha_down = 0.15
piancha_up = 0.05
result = dict()
dipFrame=pd.DataFrame()
for key, group in dateGroup:
group["Error"] = (group[titleP]/group[titleFP]).replace(np.inf,1) -1
group["ErrorPower"] = group.apply(lambda x: x[titleFP]*(x["Error"] - piancha_up) if x["Error"] >= piancha_up \
else (x[titleFP] * (x["Error"]+piancha_down) if x["Error"] <= 0 - piancha_down else 0), axis=1)
index_up = group[((group[titleFP] * piancha_up < 5) & (group["Error"] > piancha_up))].index.tolist()
group.loc[index_up, "ErrorPower"] = group.loc[index_up, titleP] - group.loc[index_up, titleFP] - 5
group.loc[index_up, "ErrorPower"] = group.loc[index_up, "ErrorPower"].apply(lambda x: 0 if x<0 else x)
index_down = group[((group[titleFP] * piancha_down < 5) & (group["Error"] < 0 - piancha_down))].index.tolist()
group.loc[index_down, "ErrorPower"] = group.loc[index_down, titleP] - group.loc[index_down, titleFP] + 5
group.loc[index_down, "ErrorPower"] = group.loc[index_down, "ErrorPower"].apply(lambda x: 0 if x>0 else x)
group["ErrorPower"] = abs(group["ErrorPower"])
result[key] = group["ErrorPower"].sum()
dipFrame = dipFrame.append(group.copy(),ignore_index=True)
return result, dipFrame
def CalcPD_byDate_exclude3(data:pd.DataFrame, titleTime:str, titleP:str, titleFP:str, baseNum:float) -> dict:
"""偏差率 |预测-实发|/实发"""
dataSet = data[[titleTime,titleP,titleFP]].copy()
dataSet["Date"] = dataSet[titleTime].apply(lambda t : t.date())
dataSet = dataSet[dataSet[titleP]>=baseNum*0.03]
result = dict()
for key, group in dataSet.groupby("Date"):
result[key] = PD(group[titleFP], group[titleP])
return result
#if __name__=="__main__":
# import numpy as np
# import pandas as pd
# from com.common.utils.FileUtils import ReadXLSX, ReadCSV
#
# df = ReadCSV(r"D:\数据整理\气象数据\QHGDTNMH.csv", timecolumns=["Time"], timeformats=["%Y-%m-%d %H:%M:%S"])
# # df = ReadCSV(r"D:\FTP\1.csv", timecolumns=["Time"], timeformats=["%Y-%m-%d %H:%M:%S"])
# df = df[["Time", "P", "F_P"]]
# df.dropna(axis=0, how="any", inplace=True)
# accFP = CalcDIP_byDate(df, "Time", "P", "F_P", 49.5, 0.25)
#
# for key in accFP:
# print(key, accFP.get(key))
| [
"1099366685@qq.com"
] | 1099366685@qq.com |
c9bf084657308a4de0a109a873e9eae451c3a16a | c5388342c19c5605f2113f327c1023ee74eb7144 | /03-Multidimensional-Lists/Exercise/07_bombs.py | e885932235d36fd249041f58b88425c2d2cabcc7 | [] | no_license | zhyordanova/Python-Advanced | 8b0cd3f31c12e726b7846d54c6ee7bfb602a07a9 | ae2a9416e89eae6c40ae965a3ad65af54b36e333 | refs/heads/main | 2023-05-04T03:17:46.883246 | 2021-05-24T22:49:31 | 2021-05-24T22:49:31 | 349,577,494 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,167 | py | def is_valid(row, col, size):
return 0 <= row < size and 0 <= col < size
def explode(row, col, size, matrix):
bomb = matrix[row][col]
for r in range(row - 1, row + 2):
for c in range(col - 1, col + 2):
if is_valid(r, c, size) and matrix[r][c] > 0:
matrix[r][c] -= bomb
n = int(input())
def init_matrix(n):
matrix = []
for _ in range(n):
matrix.append([int(el) for el in input().split()])
return matrix
def print_result(matrix):
for row in matrix:
print(' '.join([str(x) for x in row]))
matrix = init_matrix(n)
bomb_coordinates = input().split()
for bomb in bomb_coordinates:
tokens = [int(el) for el in bomb.split(',')]
bomb_row = tokens[0]
bomb_col = tokens[1]
if matrix[bomb_row][bomb_col] > 0:
explode(bomb_row, bomb_col, n, matrix)
alive_cells_count = 0
alive_cells_sum = 0
for row in range(n):
for col in range(n):
cell = matrix[row][col]
if cell > 0:
alive_cells_count += 1
alive_cells_sum += cell
print(f"Alive cells: {alive_cells_count}")
print(f"Sum: {alive_cells_sum}")
print_result(matrix)
| [
"zhivka.yordanova@mentormate.com"
] | zhivka.yordanova@mentormate.com |
4b89e9080072b2bfbc0253d4e18bb00206bf0592 | 320fd3b851241272e6d4a3ae2fce29dc4f28d343 | /backend/facerecognitionwo_18158/settings.py | c53588696a3933e82784abfa85a7cf06a649ece3 | [] | no_license | crowdbotics-apps/facerecognitionwo-18158 | c180ab923ba3cb0018106d9654ff15daef4a9f60 | f6a18def7c4b354754885c600e93bb7abac7b637 | refs/heads/master | 2022-11-08T19:32:04.083969 | 2020-06-17T03:08:56 | 2020-06-17T03:08:56 | 272,865,731 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,821 | py | """
Django settings for facerecognitionwo_18158 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'facerecognitionwo_18158.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'facerecognitionwo_18158.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
28e202096359fd0179a1f7606d56f27fc59698ba | a2930fe93a3078c444651974a02b426b637bb9ed | /thoipapy/feature_importance/mean_decrease_accuracy.py | 4cb31c2a5c27d4978e793186008bef68b972f1ca | [
"MIT"
] | permissive | bojigu/thoipapy | 5ee03e7a2fb8c62b9831f12136b33f933e2d1176 | cc571677bd7e25e73db07af9d5d3c2cc36682903 | refs/heads/develop | 2022-12-23T05:25:14.699988 | 2021-09-07T13:39:36 | 2021-09-07T13:39:36 | 128,191,009 | 2 | 0 | MIT | 2022-11-22T07:00:15 | 2018-04-05T10:26:48 | Python | UTF-8 | Python | false | false | 12,539 | py | import os
import sys
import time
from pathlib import Path
from typing import List
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedKFold
import thoipapy.utils
from thoipapy import utils
from thoipapy.validation.auc import calc_PRAUC_ROCAUC_using_10F_validation
from thoipapy.ML_model.train_model import return_classifier_with_loaded_ensemble_parameters
from thoipapy.validation.bocurve import calc_best_overlap_from_selected_column_in_df, calc_best_overlap, parse_BO_data_csv_to_excel
from thoipapy.validation.leave_one_out import get_clusters_putative_homologues_in_protein_set
def calc_feat_import_from_mean_decrease_accuracy(s, logging):
"""Calculate feature importances using mean decrease in accuracy.
This method differs from calc_feat_import_from_mean_decrease_impurity.
It's much slower, and involves the use of 10-fold cross-validation for each variable separately.
- a feature (or group of features) is selected for randomisation
- in theory, randomising important features will cause a drop in prediction accuracy
- The feature (or group of features) is shuffled
- precision-recall AUC and ROC-AUC is measured
- the difference between the original AUC and the AUC with shuffled variable is measured
- higher values suggest more important features
Parameters
----------
s : dict
Settings dictionary
logging : logging.Logger
Python object with settings for logging to console and file.
Saved Files
-----------
feat_imp_MDA_xlsx : xlsx
Comma separated values, showing decrease in AUC for each feature or group of features.
"""
logging.info('------------ starting calc_feat_import_from_mean_decrease_accuracy ------------')
# input
train_data_csv = Path(s["data_dir"]) / f"results/{s['setname']}/train_data/03_train_data_after_first_feature_seln.csv"
tuned_ensemble_parameters_csv = Path(s["data_dir"]) / f"results/{s['setname']}/train_data/04_tuned_ensemble_parameters.csv"
# output
feat_imp_MDA_xlsx = os.path.join(s["data_dir"], "results", s["setname"], "feat_imp", "feat_imp_mean_decrease_accuracy.xlsx")
feat_imp_temp_THOIPA_BO_curve_data_csv = Path(s["data_dir"]) / f"results/{s['setname']}/feat_imp/feat_imp_temp_THOIPA.best_overlap_data.csv"
feat_imp_temp_bocurve_data_xlsx = Path(s["data_dir"]) / f"results/{s['setname']}/feat_imp/feat_imp_temp_bocurve_data.xlsx"
thoipapy.utils.make_sure_path_exists(feat_imp_MDA_xlsx, isfile=True)
df_data = pd.read_csv(train_data_csv, index_col=0)
if df_data.isnull().values.any():
for col in df_data.columns:
if df_data[col].isnull().values.any():
logging.warning(f"{col} contains nan values")
raise Exception("df_data contains nan values. Please check names of features_to_be_retained_during_selection in settings file.")
# drop training data (full protein) that don't have enough homologues
if s["min_n_homol_training"] != 0:
df_data = df_data.loc[df_data.n_homologues >= s["min_n_homol_training"]]
cols_excluding_y = [c for c in df_data.columns if c != s['bind_column']]
X = df_data[cols_excluding_y]
y = df_data["interface"]
settings_path = s["settings_path"]
df_feat = pd.read_excel(settings_path, sheet_name="features")
df_feat = df_feat.loc[df_feat.include == 1]
feature_types: list = list(df_feat.feature_type.unique())
## DEPRECATED HARD-CODED LIST: use feature_type in settings file, instead
# polarity_features = ["test_dropping_of_features_not_included", "polarity", "relative_polarity", "polarity4mean", "polarity3Nmean", "polarity3Cmean", "polarity1mean"]
# pssm_features = ["A", "C", "D", "E", "F", "G", "H", "I", "K", "L", "M", "N", "P", "Q", "R", "S", "T", "V", "W", "Y", "CS", "DE", "KR", "QN", "LIV"]
# coev_features = ["DImax", "MImax", "DItop4mean", "MItop4mean", "DItop8mean", "MItop8mean", "DI4max", "MI4max", "DI1mean", "MI1mean", "DI3mean", "MI3mean", "DI4mean", "MI4mean", "DI4cum"]
# DI_features = ["DImax", "DItop4mean", "DItop8mean", "DI4max", "DI1mean", "DI3mean", "DI4mean", "DI4cum"]
# MI_features = ["MImax", "MItop4mean", "MItop8mean", "MI4max", "MI1mean", "MI3mean", "MI4mean"]
# cons_features = ["entropy", "cons4mean", "conservation"]
# motif_features = ["GxxxG", "SmxxxSm"]
# physical_features = ["branched", "mass"]
# TMD_features = ["residue_depth", "n_TMDs", "n_homologues"]
# polarity_and_pssm_features = polarity_features + pssm_features
# features_nested_list = [polarity_and_pssm_features, coev_features, DI_features, MI_features, cons_features, motif_features, physical_features, TMD_features]
# features_nested_namelist = ["polarity_and_pssm_features", "coev_features", "DI_features", "MI_features", "cons_features", "motif_features", "physical_features", "TMD_features"]
# for i in range(len(features_nested_list)):
# sys.stdout.write("\n{} : {}".format(features_nested_namelist[i], features_nested_list[i]))
forest = return_classifier_with_loaded_ensemble_parameters(s, tuned_ensemble_parameters_csv)
pr_auc_orig, roc_auc_orig = calc_PRAUC_ROCAUC_using_10F_validation(X, y, forest)
auboc_orig = calc_AUBOC_for_feat_imp(y, X, forest, feat_imp_temp_THOIPA_BO_curve_data_csv, feat_imp_temp_bocurve_data_xlsx, s, logging)
start = time.clock()
sys.stdout.write("\nmean : {:.03f}\n".format(pr_auc_orig)), sys.stdout.flush()
################### grouped features ###################
grouped_feat_decrease_PR_AUC_dict = {}
grouped_feat_decrease_ROC_AUC_dict = {}
grouped_feat_decrease_AUBOC_dict = {}
for feature_type in feature_types:
df_feat_selected = df_feat.loc[df_feat.feature_type == feature_type]
feature_list = df_feat_selected.feature.to_list()
feature_list = list(set(feature_list).intersection(set(X.columns.tolist())))
X_t = X.copy()
for feature in feature_list:
# shuffle the data for that feature
row_to_shuffle = X_t[feature].to_numpy()
np.random.shuffle(row_to_shuffle)
X_t[feature] = row_to_shuffle
# calculate prediction performance after shuffling
PR_AUC, ROC_AUC = calc_PRAUC_ROCAUC_using_10F_validation(X_t, y, forest)
decrease_PR_AUC = pr_auc_orig - PR_AUC
grouped_feat_decrease_PR_AUC_dict[feature_type] = decrease_PR_AUC
decrease_ROC_AUC = roc_auc_orig - ROC_AUC
grouped_feat_decrease_ROC_AUC_dict[feature_type] = decrease_ROC_AUC
auboc = calc_AUBOC_for_feat_imp(y, X_t, forest, feat_imp_temp_THOIPA_BO_curve_data_csv, feat_imp_temp_bocurve_data_xlsx, s, logging)
decrease_auboc = auboc_orig - auboc
grouped_feat_decrease_AUBOC_dict[feature_type] = decrease_auboc
logging.info(f"{feature_type} : decrease AUBOC ({decrease_auboc:.03f}), decrease PR-AUC ({decrease_PR_AUC:.03f}), "
f"decrease ROC-AUC ({decrease_ROC_AUC:.03f}), included features ({feature_list})")
# remove temp bocurve output files
feat_imp_temp_THOIPA_BO_curve_data_csv.unlink()
feat_imp_temp_bocurve_data_xlsx.unlink()
################### single features ###################
single_feat_decrease_PR_AUC_dict = {}
single_feat_decrease_ROC_AUC_dict = {}
single_feat_decrease_AUBOC_dict = {}
for feature in X.columns:
X_t = X.copy()
# shuffle the data for that feature
row_to_shuffle = X_t[feature].to_numpy()
np.random.shuffle(row_to_shuffle)
X_t[feature] = row_to_shuffle
# calculate prediction performance after shuffling
PR_AUC, ROC_AUC = calc_PRAUC_ROCAUC_using_10F_validation(X_t, y, forest)
decrease_PR_AUC = pr_auc_orig - PR_AUC
single_feat_decrease_PR_AUC_dict[feature] = decrease_PR_AUC
decrease_ROC_AUC = roc_auc_orig - ROC_AUC
single_feat_decrease_ROC_AUC_dict[feature] = decrease_ROC_AUC
auboc = calc_AUBOC_for_feat_imp(y, X_t, forest, feat_imp_temp_THOIPA_BO_curve_data_csv, feat_imp_temp_bocurve_data_xlsx, s, logging)
decrease_auboc = auboc_orig - auboc
single_feat_decrease_AUBOC_dict[feature] = decrease_auboc
logging.info(f" {feature} {decrease_auboc:.03f} | {decrease_PR_AUC:.03f} | {decrease_ROC_AUC:.03f}")
df_grouped_feat = pd.DataFrame()
df_grouped_feat["PR_AUC"] = pd.Series(grouped_feat_decrease_PR_AUC_dict)
df_grouped_feat["ROC_AUC"] = pd.Series(grouped_feat_decrease_ROC_AUC_dict)
df_grouped_feat["AUBOC"] = pd.Series(grouped_feat_decrease_AUBOC_dict)
df_grouped_feat.sort_values("AUBOC", ascending=False, inplace=True)
df_grouped_feat_norm = pd.DataFrame()
for col in df_grouped_feat.columns:
df_grouped_feat_norm[col] = utils.normalise_0_1(df_grouped_feat[col])[0]
df_single_feat = pd.DataFrame()
df_single_feat["PR_AUC"] = pd.Series(single_feat_decrease_PR_AUC_dict)
df_single_feat["ROC_AUC"] = pd.Series(single_feat_decrease_ROC_AUC_dict)
df_single_feat["AUBOC"] = pd.Series(single_feat_decrease_AUBOC_dict)
df_single_feat.sort_values("AUBOC", ascending=False, inplace=True)
df_single_feat_norm = pd.DataFrame()
for col in df_single_feat.columns:
df_single_feat_norm[col] = utils.normalise_0_1(df_single_feat[col])[0]
writer = pd.ExcelWriter(feat_imp_MDA_xlsx)
df_grouped_feat.to_excel(writer, sheet_name="grouped_feat")
df_grouped_feat_norm.to_excel(writer, sheet_name="grouped_feat_norm")
df_single_feat.to_excel(writer, sheet_name="single_feat")
df_single_feat_norm.to_excel(writer, sheet_name="single_feat_norm")
writer.save()
writer.close()
duration = time.clock() - start
logging.info('{} calc_feat_import_from_mean_decrease_accuracy. PR_AUC({:.3f}). Time taken = {:.2f}.\nFeatures: {}'.format(s["setname"], pr_auc_orig, duration, X.columns.tolist()))
logging.info(f'output: ({feat_imp_MDA_xlsx})')
logging.info('------------ finished calc_feat_import_from_mean_decrease_accuracy ------------')
def calc_AUBOC_for_feat_imp(y, X_t, forest, feat_imp_temp_THOIPA_BO_curve_data_csv, feat_imp_temp_bocurve_data_xlsx, s, logging):
THOIPA_BO_data_df = pd.DataFrame()
acc_db_list = pd.Series(X_t.index).str.split("_").str[0].unique().tolist()
sim_matrix_xlsx = Path(s["data_dir"]) / f"results/{s['setname']}/clusters/{s['setname']}_sim_matrix.xlsx"
putative_homologue_clusters = get_clusters_putative_homologues_in_protein_set(sim_matrix_xlsx)
for acc_db in acc_db_list:
clusters_containing_acc_db_of_interest = [c for c in putative_homologue_clusters if acc_db in c]
acc_db_putative_homologues: List[str] = clusters_containing_acc_db_of_interest[0]
rows_including_test_tmd = pd.Series(X_t.index).apply(lambda x: x.split("_")[0] in acc_db_putative_homologues).to_list()
rows_excluding_test_tmd = [not i for i in rows_including_test_tmd]
y_test_tmd = y.loc[rows_including_test_tmd]
y_excluding_test_tmd = y.loc[rows_excluding_test_tmd]
X_t_test_tmd = X_t.loc[rows_including_test_tmd]
X_t_excluding_test_tmd = X_t.loc[rows_excluding_test_tmd]
assert acc_db not in "".join(X_t_excluding_test_tmd.index.to_list())
probas_ = forest.fit(X_t_excluding_test_tmd, y_excluding_test_tmd).predict_proba(X_t_test_tmd)
experiment_data = y_test_tmd
prediction_data = probas_[:, 1]
THOIPA_BO_single_prot_df = calc_best_overlap(acc_db, experiment_data, prediction_data)
if THOIPA_BO_data_df.empty:
THOIPA_BO_data_df = THOIPA_BO_single_prot_df
else:
THOIPA_BO_data_df = pd.concat([THOIPA_BO_data_df, THOIPA_BO_single_prot_df], axis=1, join="outer")
THOIPA_BO_data_df.to_csv(feat_imp_temp_THOIPA_BO_curve_data_csv)
# THOIPA_linechart_mean_obs_and_rand = analyse_bo_curve_underlying_data(THOIPA_BO_curve_data_csv, BO_curve_folder, names_excel_path)
parse_BO_data_csv_to_excel(feat_imp_temp_THOIPA_BO_curve_data_csv, feat_imp_temp_bocurve_data_xlsx, s["n_residues_AUBOC_validation"], logging, log_auboc=False)
df_bocurve = pd.read_excel(feat_imp_temp_bocurve_data_xlsx, sheet_name="mean_o_minus_r", index_col=0)
# apply cutoff (e.g. 5 residues for AUBOC5)
n_residues_AUBOC_validation = s["n_residues_AUBOC_validation"]
df_bocurve = df_bocurve.iloc[:n_residues_AUBOC_validation]
AUBOC = np.trapz(y=df_bocurve["mean_o_minus_r"], x=df_bocurve.index)
return AUBOC
| [
"mark.teese@tum.de"
] | mark.teese@tum.de |
6377537fb4a12e481b8bc1eccef538726e98b2d7 | 75d930e35e6ae1fd0ec7be6a4a0893ca3d3c7eb6 | /extra/tigertool/ecusb/pty_driver.py | 2aec6fecad883b16e5ed4d0495f190bc9f1c70b5 | [
"BSD-3-Clause"
] | permissive | mirrexagon/chrome-ec | aa8aa862c5680b657074c97203f1690a87482cc5 | bf8b2dd9996bc633cec2942ebcdd1118313af654 | refs/heads/master | 2022-11-15T03:58:23.258930 | 2020-06-22T03:42:22 | 2020-06-22T09:10:39 | 274,113,110 | 0 | 0 | BSD-3-Clause | 2020-06-22T10:59:13 | 2020-06-22T10:59:13 | null | UTF-8 | Python | false | false | 9,006 | py | # Copyright 2017 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""ptyDriver class
This class takes a pty interface and can send commands and expect results
as regex. This is useful for automating console based interfaces, such as
the CrOS EC console commands.
"""
import ast
import errno
import fcntl
import os
import pexpect
from pexpect import fdpexpect
# Expecting a result in 3 seconds is plenty even for slow platforms.
DEFAULT_UART_TIMEOUT = 3
class ptyError(Exception):
"""Exception class for pty errors."""
UART_PARAMS = {
'uart_cmd': None,
'uart_multicmd': None,
'uart_regexp': None,
'uart_timeout': DEFAULT_UART_TIMEOUT,
}
class ptyDriver(object):
"""Automate interactive commands on a pty interface."""
def __init__(self, interface, params, fast=False):
"""Init class variables."""
self._child = None
self._fd = None
self._interface = interface
self._pty_path = self._interface.get_pty()
self._dict = UART_PARAMS.copy()
self._fast = fast
def __del__(self):
self.close()
def close(self):
"""Close any open files and interfaces."""
if self._fd:
self._close()
self._interface.close()
def _open(self):
"""Connect to serial device and create pexpect interface."""
assert self._fd is None
self._fd = os.open(self._pty_path, os.O_RDWR | os.O_NONBLOCK)
# Don't allow forked processes to access.
fcntl.fcntl(self._fd, fcntl.F_SETFD,
fcntl.fcntl(self._fd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
self._child = fdpexpect.fdspawn(self._fd)
# pexpect defaults to a 100ms delay before sending characters, to
# work around race conditions in ssh. We don't need this feature
# so we'll change delaybeforesend from 0.1 to 0.001 to speed things up.
if self._fast:
self._child.delaybeforesend = 0.001
def _close(self):
"""Close serial device connection."""
os.close(self._fd)
self._fd = None
self._child = None
def _flush(self):
"""Flush device output to prevent previous messages interfering."""
if self._child.sendline('') != 1:
raise ptyError('Failed to send newline.')
while True:
try:
self._child.expect('.', timeout=0.2)
except (pexpect.TIMEOUT, pexpect.EOF):
break
except OSError as e:
# EAGAIN indicates no data available, maybe we didn't wait long enough.
if e.errno != errno.EAGAIN:
raise
break
def _send(self, cmds):
"""Send command to EC.
This function always flushes serial device before sending, and is used as
a wrapper function to make sure the channel is always flushed before
sending commands.
Args:
cmds: The commands to send to the device, either a list or a string.
Raises:
ptyError: Raised when writing to the device fails.
"""
self._flush()
if not isinstance(cmds, list):
cmds = [cmds]
for cmd in cmds:
if self._child.sendline(cmd) != len(cmd) + 1:
raise ptyError('Failed to send command.')
def _issue_cmd(self, cmds):
"""Send command to the device and do not wait for response.
Args:
cmds: The commands to send to the device, either a list or a string.
"""
self._issue_cmd_get_results(cmds, [])
def _issue_cmd_get_results(self, cmds,
regex_list, timeout=DEFAULT_UART_TIMEOUT):
"""Send command to the device and wait for response.
This function waits for response message matching a regular
expressions.
Args:
cmds: The commands issued, either a list or a string.
regex_list: List of Regular expressions used to match response message.
Note1, list must be ordered.
Note2, empty list sends and returns.
timeout: time to wait for matching results before failing.
Returns:
List of tuples, each of which contains the entire matched string and
all the subgroups of the match. None if not matched.
For example:
response of the given command:
High temp: 37.2
Low temp: 36.4
regex_list:
['High temp: (\d+)\.(\d+)', 'Low temp: (\d+)\.(\d+)']
returns:
[('High temp: 37.2', '37', '2'), ('Low temp: 36.4', '36', '4')]
Raises:
ptyError: If timed out waiting for a response
"""
result_list = []
self._open()
try:
self._send(cmds)
for regex in regex_list:
self._child.expect(regex, timeout)
match = self._child.match
lastindex = match.lastindex if match and match.lastindex else 0
# Create a tuple which contains the entire matched string and all
# the subgroups of the match.
result = match.group(*range(lastindex + 1)) if match else None
result_list.append(result)
except pexpect.TIMEOUT:
raise ptyError('Timeout waiting for response.')
finally:
self._close()
return result_list
def _issue_cmd_get_multi_results(self, cmd, regex):
"""Send command to the device and wait for multiple response.
This function waits for arbitrary number of response message
matching a regular expression.
Args:
cmd: The command issued.
regex: Regular expression used to match response message.
Returns:
List of tuples, each of which contains the entire matched string and
all the subgroups of the match. None if not matched.
"""
result_list = []
self._open()
try:
self._send(cmd)
while True:
try:
self._child.expect(regex, timeout=0.1)
match = self._child.match
lastindex = match.lastindex if match and match.lastindex else 0
# Create a tuple which contains the entire matched string and all
# the subgroups of the match.
result = match.group(*range(lastindex + 1)) if match else None
result_list.append(result)
except pexpect.TIMEOUT:
break
finally:
self._close()
return result_list
def _Set_uart_timeout(self, timeout):
"""Set timeout value for waiting for the device response.
Args:
timeout: Timeout value in second.
"""
self._dict['uart_timeout'] = timeout
def _Get_uart_timeout(self):
"""Get timeout value for waiting for the device response.
Returns:
Timeout value in second.
"""
return self._dict['uart_timeout']
def _Set_uart_regexp(self, regexp):
"""Set the list of regular expressions which matches the command response.
Args:
regexp: A string which contains a list of regular expressions.
"""
if not isinstance(regexp, str):
raise ptyError('The argument regexp should be a string.')
self._dict['uart_regexp'] = ast.literal_eval(regexp)
def _Get_uart_regexp(self):
"""Get the list of regular expressions which matches the command response.
Returns:
A string which contains a list of regular expressions.
"""
return str(self._dict['uart_regexp'])
def _Set_uart_cmd(self, cmd):
"""Set the UART command and send it to the device.
If ec_uart_regexp is 'None', the command is just sent and it doesn't care
about its response.
If ec_uart_regexp is not 'None', the command is send and its response,
which matches the regular expression of ec_uart_regexp, will be kept.
Use its getter to obtain this result. If no match after ec_uart_timeout
seconds, a timeout error will be raised.
Args:
cmd: A string of UART command.
"""
if self._dict['uart_regexp']:
self._dict['uart_cmd'] = self._issue_cmd_get_results(
cmd, self._dict['uart_regexp'], self._dict['uart_timeout'])
else:
self._dict['uart_cmd'] = None
self._issue_cmd(cmd)
def _Set_uart_multicmd(self, cmds):
"""Set multiple UART commands and send them to the device.
Note that ec_uart_regexp is not supported to match the results.
Args:
cmds: A semicolon-separated string of UART commands.
"""
self._issue_cmd(cmds.split(';'))
def _Get_uart_cmd(self):
"""Get the result of the latest UART command.
Returns:
A string which contains a list of tuples, each of which contains the
entire matched string and all the subgroups of the match. 'None' if
the ec_uart_regexp is 'None'.
"""
return str(self._dict['uart_cmd'])
def _Set_uart_capture(self, cmd):
"""Set UART capture mode (on or off).
Once capture is enabled, UART output could be collected periodically by
invoking _Get_uart_stream() below.
Args:
cmd: True for on, False for off
"""
self._interface.set_capture_active(cmd)
def _Get_uart_capture(self):
"""Get the UART capture mode (on or off)."""
return self._interface.get_capture_active()
def _Get_uart_stream(self):
"""Get uart stream generated since last time."""
return self._interface.get_stream()
| [
"chrome-bot@chromium.org"
] | chrome-bot@chromium.org |
e02e07efc8781878d94ad37a562af666faad436d | 87140007e96872d3611f0778eb0eebe5799616d7 | /runs/seq-nobro-iter05000.cfg.py | a151e8050cd85bb2c0c524781a03097f9710d643 | [
"MIT"
] | permissive | janpawellek/broeval | 49499fa302abff916ffced201034d3b9394503cd | 57e31aa6e354d0bba88103b44910483e8d982d00 | refs/heads/master | 2021-01-11T12:19:13.619220 | 2016-12-20T16:23:27 | 2016-12-20T16:23:27 | 76,468,134 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 627 | py |
# Write results to this file
OUTFILE = 'runs/seq-nobro-iter05000.result.csv'
# Source computers for the requests
SOURCE = ['10.0.0.1']
# Should Bro be enabled on the source machines?
SOURCE_BRO = [False]
# Target machines for the requests (aka server)
TARGET = ['10.0.0.2']
# Should Bro be enabled on the target machines?
TARGET_BRO = [False]
# Connection mode (par = parallel, seq = sequential)
MODE = 'seq'
# Number of evaluation repetitions to run
EPOCHS = 100
# Number of iterations to be run in each evaluation repetition
ITER = 5000
# Size of the file to be downloaded from target (in Bytes * 10^SIZE)
SIZE = 5
| [
"pawellek@stud.uni-heidelberg.de"
] | pawellek@stud.uni-heidelberg.de |
b993cb6e90864558f76aefb296699a34f5fea66e | 85a0ee08b54b2c5e3154e3727b92c37915b4c1de | /Sample/Python_Future_Sample/實單交易/63.py | 62772c75e90765557034eb58bfeb589df05361f8 | [] | no_license | yaotony/sandbox-empty | 877da9b9ba0ec658bbdc8acc79a97267f96408b9 | 85f04e5db5d26a04fad9ae4ad6d3c86977a9f865 | refs/heads/master | 2022-05-20T17:14:51.348759 | 2022-05-04T08:06:56 | 2022-05-04T08:06:56 | 35,472,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 592 | py | # -*- coding: UTF-8 -*-
# 載入相關套件
import sys,indicator,datetime,haohaninfo
# 券商
Broker = 'Masterlink_Future'
# 定義資料類別
Table = 'match'
# 定義商品名稱
Prod = sys.argv[1]
# 取得當天日期
Date = datetime.datetime.now().strftime("%Y%m%d")
# K棒物件
KBar = indicator.KBar(Date,'volume',100)
# 計算成交量K棒
GO = haohaninfo.GOrder.GOQuote()
for i in GO.Describe(Broker, Table, Prod):
price = int(i[2])
amount = int(i[4])
KBar.VolumeAdd(price,amount)
print(KBar.GetOpen(),KBar.GetHigh(),KBar.GetLow(),KBar.GetClose())
| [
"tony.exmail@gmail.com"
] | tony.exmail@gmail.com |
d9881b41072708753168c8ca9fc7b2c4cd8b81d1 | fd8a4f9cc859f5edf468c1a74f6a183a4c1db508 | /shoulder/transform/abstract_transform.py | 34606b3276cdaca2fb9da5ebf347ebbd4c9cce8d | [
"MIT"
] | permissive | ainfosec/shoulder | e84d8c2b0ac8e049a21dabc7d221062edcb305d6 | 8dfb059701910a1cbe57e14c676ac8930f71b7c4 | refs/heads/master | 2020-09-08T07:43:43.418527 | 2019-09-25T17:48:08 | 2019-09-25T17:48:08 | 221,065,767 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,742 | py | #
# Shoulder
# Copyright (C) 2018 Assured Information Security, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import abc
from shoulder.logger import logger
class AbstractTransform(abc.ABC):
@property
@abc.abstractmethod
def description(self):
""" Description of what this transform does """
@abc.abstractmethod
def do_transform(self, reg):
""" Transform the given register object """
return
def transform(self, registers):
"""
Transform the given list of registers
"""
logger.info("Applying transform: {d}".format(d = str(self)))
return list(map(self.do_transform, registers))
def __str__(self):
return self.description
| [
"jared.wright12@gmail.com"
] | jared.wright12@gmail.com |
a5fa36dd2cf1c11d2ed3fe5a0d8f6917c5cf1351 | 2ea61e98627dd6b170590b69ead79a828614dec0 | /youtrack_api/model/bundle_project_custom_field.py | 361bfae9cbe767daec942d97686369ee6b3d6c85 | [] | no_license | alpduez/youtrack_api | 55dc25465f027645525efe5296c5699f7d824f33 | 2450523d87e6bdbbd53ca4908042a701a1a867e6 | refs/heads/master | 2023-09-01T01:34:33.356354 | 2021-10-20T15:32:05 | 2021-10-20T15:32:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,593 | py | """
YouTrack REST API
YouTrack issue tracking and project management system # noqa: E501
The version of the OpenAPI document: 2021.3
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from youtrack_api.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from youtrack_api.exceptions import ApiAttributeError
def lazy_import():
from youtrack_api.model.build_project_custom_field import BuildProjectCustomField
from youtrack_api.model.custom_field import CustomField
from youtrack_api.model.enum_project_custom_field import EnumProjectCustomField
from youtrack_api.model.owned_project_custom_field import OwnedProjectCustomField
from youtrack_api.model.project import Project
from youtrack_api.model.project_custom_field import ProjectCustomField
from youtrack_api.model.state_project_custom_field import StateProjectCustomField
from youtrack_api.model.user_project_custom_field import UserProjectCustomField
from youtrack_api.model.version_project_custom_field import VersionProjectCustomField
globals()['BuildProjectCustomField'] = BuildProjectCustomField
globals()['CustomField'] = CustomField
globals()['EnumProjectCustomField'] = EnumProjectCustomField
globals()['OwnedProjectCustomField'] = OwnedProjectCustomField
globals()['Project'] = Project
globals()['ProjectCustomField'] = ProjectCustomField
globals()['StateProjectCustomField'] = StateProjectCustomField
globals()['UserProjectCustomField'] = UserProjectCustomField
globals()['VersionProjectCustomField'] = VersionProjectCustomField
class BundleProjectCustomField(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'field': (CustomField,), # noqa: E501
'project': (Project,), # noqa: E501
'can_be_empty': (bool,), # noqa: E501
'empty_field_text': (str,), # noqa: E501
'ordinal': (int,), # noqa: E501
'is_public': (bool,), # noqa: E501
'has_running_job': (bool,), # noqa: E501
'id': (str,), # noqa: E501
'type': (str,), # noqa: E501
}
@cached_property
def discriminator():
lazy_import()
val = {
'BuildProjectCustomField': BuildProjectCustomField,
'EnumProjectCustomField': EnumProjectCustomField,
'OwnedProjectCustomField': OwnedProjectCustomField,
'StateProjectCustomField': StateProjectCustomField,
'UserProjectCustomField': UserProjectCustomField,
'VersionProjectCustomField': VersionProjectCustomField,
}
if not val:
return None
return {'type': val}
attribute_map = {
'field': 'field', # noqa: E501
'project': 'project', # noqa: E501
'can_be_empty': 'canBeEmpty', # noqa: E501
'empty_field_text': 'emptyFieldText', # noqa: E501
'ordinal': 'ordinal', # noqa: E501
'is_public': 'isPublic', # noqa: E501
'has_running_job': 'hasRunningJob', # noqa: E501
'id': 'id', # noqa: E501
'type': '$type', # noqa: E501
}
read_only_vars = {
'has_running_job', # noqa: E501
'id', # noqa: E501
'type', # noqa: E501
}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""BundleProjectCustomField - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
field (CustomField): [optional] # noqa: E501
project (Project): [optional] # noqa: E501
can_be_empty (bool): [optional] # noqa: E501
empty_field_text (str): [optional] # noqa: E501
ordinal (int): [optional] # noqa: E501
is_public (bool): [optional] # noqa: E501
has_running_job (bool): [optional] # noqa: E501
id (str): [optional] # noqa: E501
type (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""BundleProjectCustomField - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
field (CustomField): [optional] # noqa: E501
project (Project): [optional] # noqa: E501
can_be_empty (bool): [optional] # noqa: E501
empty_field_text (str): [optional] # noqa: E501
ordinal (int): [optional] # noqa: E501
is_public (bool): [optional] # noqa: E501
has_running_job (bool): [optional] # noqa: E501
id (str): [optional] # noqa: E501
type (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
ProjectCustomField,
],
'oneOf': [
],
}
| [
"hank@sellerlabs.com"
] | hank@sellerlabs.com |
b52409a23c489ce692b34cdd25b4c4737f26213e | e7e34e2726790686a1f239e22487fe7c957e179f | /tests/components/juicenet/test_config_flow.py | abda068b622bf9d45d9575b147cd892f53c3e702 | [
"Apache-2.0"
] | permissive | AlexxIT/home-assistant | 68a17b49644c5d943b204dc75e1f11fe3b701161 | 8de7966104911bca6f855a1755a6d71a07afb9de | refs/heads/dev | 2022-03-22T14:37:18.774214 | 2021-10-09T16:10:43 | 2021-10-09T16:10:43 | 100,278,871 | 9 | 0 | Apache-2.0 | 2022-01-31T06:18:02 | 2017-08-14T14:50:46 | Python | UTF-8 | Python | false | false | 4,171 | py | """Test the JuiceNet config flow."""
from unittest.mock import MagicMock, patch
import aiohttp
from pyjuicenet import TokenError
from homeassistant import config_entries
from homeassistant.components.juicenet.const import DOMAIN
from homeassistant.const import CONF_ACCESS_TOKEN
def _mock_juicenet_return_value(get_devices=None):
juicenet_mock = MagicMock()
type(juicenet_mock).get_devices = MagicMock(return_value=get_devices)
return juicenet_mock
async def test_form(hass):
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch(
"homeassistant.components.juicenet.config_flow.Api.get_devices",
return_value=MagicMock(),
), patch(
"homeassistant.components.juicenet.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.juicenet.async_setup_entry", return_value=True
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_ACCESS_TOKEN: "access_token"}
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "JuiceNet"
assert result2["data"] == {CONF_ACCESS_TOKEN: "access_token"}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_invalid_auth(hass):
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.juicenet.config_flow.Api.get_devices",
side_effect=TokenError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_ACCESS_TOKEN: "access_token"}
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_cannot_connect(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.juicenet.config_flow.Api.get_devices",
side_effect=aiohttp.ClientError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_ACCESS_TOKEN: "access_token"}
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_catch_unknown_errors(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.juicenet.config_flow.Api.get_devices",
side_effect=Exception,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_ACCESS_TOKEN: "access_token"}
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "unknown"}
async def test_import(hass):
"""Test that import works as expected."""
with patch(
"homeassistant.components.juicenet.config_flow.Api.get_devices",
return_value=MagicMock(),
), patch(
"homeassistant.components.juicenet.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.juicenet.async_setup_entry", return_value=True
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={CONF_ACCESS_TOKEN: "access_token"},
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == "JuiceNet"
assert result["data"] == {CONF_ACCESS_TOKEN: "access_token"}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
| [
"noreply@github.com"
] | AlexxIT.noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.