blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3ce218574fdef85fc097503d423a3aab88bbc7ad | 8e25365117fdeb2e27a8d8207c6d150f839a57b0 | /sample_function.py | 2548d85a249f73d27f77b3891cc5cfccaa6739a9 | [] | no_license | rajagennu/python_hackerrank | 5feae3ee67f5f03613f365c788c1881b8ce6b691 | 863522cfa5365418019331e5042276cfbfaf4783 | refs/heads/master | 2022-11-18T13:23:20.165402 | 2020-07-18T16:08:20 | 2020-07-18T16:08:20 | 266,466,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | from random import randint
def padding(modifier):
random_numer = randint(1, 10)
return random_numer + modifier
print(padding(5))
| [
"replituser@example.com"
] | replituser@example.com |
8eec87f0ec970c1c4218ea275e77e39df6aaba19 | 50914176887f9f21a3489a9407195ba14831354c | /guess_number_higher_or_lower.py | 526240182e29a6ff0eabd433fc3fab55af78b4ad | [] | no_license | nkukarl/leetcode | e8cfc2a31e64b68222ad7af631277f1f66d277bc | b1dbe37e8ca1c88714f91643085625ccced76e07 | refs/heads/master | 2021-01-10T05:42:04.022807 | 2018-02-24T03:55:24 | 2018-02-24T03:55:24 | 43,725,072 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 456 | py | def guess(num):
N = 2
if N == num:
return 0
if num > N:
return -1
return 1
class Solution(object):
def guess_number(self, n):
low, high = 1, n
while low < high:
mid = (low + high) // 2
resp = guess(mid)
if 0 == resp:
return mid
if -1 == resp:
high = mid
else:
low = mid + 1
return high
| [
"kai.wang.nankai@gmail.com"
] | kai.wang.nankai@gmail.com |
9ed33d1e385c295eec8c16b6e9b9895bd7e82776 | 6b16ebc5eff0eca638959662319998e123bcea6e | /app/View/playing_interface/smallest_play_mode_interface.py | 129385810821115111c2b9988f907cb492c6fe40 | [] | no_license | wlmsoft/Groove | fa71964080343335beac0535e15dcc5d1ef76faa | da78c8bba159c67c7a25ba64dae060a577c816e8 | refs/heads/master | 2023-04-04T08:41:43.404671 | 2021-04-17T03:24:09 | 2021-04-17T03:24:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,098 | py | # coding:utf-8
from PyQt5.QtCore import (
QAbstractAnimation,
QEasingCurve,
QEvent,
QParallelAnimationGroup,
QPropertyAnimation,
QRect,
Qt,
)
from PyQt5.QtGui import QFont, QFontMetrics
from PyQt5.QtWidgets import QGraphicsOpacityEffect, QLabel, QSlider, QWidget
from .play_bar_buttons import BasicCircleButton
from .smallest_play_mode_buttons import PlayButton, SmallestPlayModeButton
class SmallestPlayModeInterface(QWidget):
""" 最小播放模式界面 """
CYCLE_LEFT_SHIFT = 0
CYCLE_RIGHT_SHIFT = 1
def __init__(self, playlist: list, parent=None):
super().__init__(parent)
self.playlist = playlist
self.currentIndex = 0
self.shiftLeftTime = 0
self.shiftRightTime = 0
self.songInfoCard_list = []
self.__unCompleteShift_list = []
# 创建按钮
self.playButton = PlayButton(
[
r"app\resource\images\smallest_play_mode\播放_45_45.png",
r"app\resource\images\smallest_play_mode\暂停_45_45.png",
],
self,
)
self.lastSongButton = SmallestPlayModeButton(
r"app\resource\images\smallest_play_mode\上一首_45_45.png", self
)
self.nextSongButton = SmallestPlayModeButton(
r"app\resource\images\smallest_play_mode\下一首_45_45.png", self
)
self.exitSmallestModeButton = BasicCircleButton(
r"app\resource\images\playing_interface\最小模式播放_47_47.png", self
)
self.progressBar = QSlider(Qt.Horizontal, self)
self.aniGroup = QParallelAnimationGroup(self)
# 创建歌曲信息卡
self.__createSongInfoCards()
# 初始化
self.__initWidget()
def __initWidget(self):
""" 初始化界面 """
self.resize(350, 350)
self.setMinimumSize(206, 197)
self.setAttribute(Qt.WA_TranslucentBackground)
self.setObjectName("smallestModeInterface")
self.progressBar.setObjectName("smallestModeSlider")
self.progressBar.installEventFilter(self)
self.aniGroup.finished.connect(self.__switchSongInfoCard)
self.__setQss()
def __createSongInfoCards(self):
""" 创建歌曲信息卡 """
# 创建三个歌曲信息卡,分别为last, current, next
self.songInfoCard_list = [SongInfoCard(parent=self) for i in range(3)]
# 引用当前歌曲卡
self.lastSongInfoCard = self.songInfoCard_list[0] # type:SongInfoCard
self.curSongInfoCard = self.songInfoCard_list[1] # type:SongInfoCard
self.nextSongInfoCard = self.songInfoCard_list[2] # type:SongInfoCard
# 创建动画
self.songInfoCardAni_list = [
QPropertyAnimation(self.songInfoCard_list[i], b"geometry") for i in range(3)
]
self.lastSongInfoCardAni = self.songInfoCardAni_list[0]
self.curSongInfoCardAni = self.songInfoCardAni_list[1]
self.nextSongInfoCardAni = self.songInfoCardAni_list[2]
# 将动画添加到动画组中
for ani in self.songInfoCardAni_list:
self.aniGroup.addAnimation(ani)
# 初始化歌曲卡
for i in range(3):
self.songInfoCard_list[i].move((i - 1) * self.width(), self.height() - 106)
if self.playlist:
self.curSongInfoCard.updateCard(self.playlist[0])
if len(self.playlist) >= 2:
self.nextSongInfoCard.updateCard(self.playlist[1])
def resizeEvent(self, e):
""" 改变窗口大小时调整按钮位置和标签宽度 """
self.progressBar.resize(self.width(), 5)
self.progressBar.move(0, self.height() - 5)
self.exitSmallestModeButton.move(
self.width() - 7 - self.exitSmallestModeButton.width(),
self.height() - 7 - self.exitSmallestModeButton.height(),
)
self.playButton.move(
int(self.width() / 2 - self.playButton.width() / 2),
int(self.height() / 2 - self.playButton.height() / 2),
)
self.lastSongButton.move(self.playButton.x() - 75, self.playButton.y())
self.nextSongButton.move(self.playButton.x() + 75, self.playButton.y())
# 所有歌曲信息卡设置位置
for i in range(3):
self.songInfoCard_list[i].resize(
self.width(), self.songInfoCard_list[i].height()
)
self.curSongInfoCard.move(0, self.height() - 106)
self.lastSongInfoCard.move(-self.width(), self.height() - 106)
self.nextSongInfoCard.move(self.width(), self.height() - 106)
# 高度太小时隐藏歌曲信息卡
if self.height() <= 320:
if self.curSongInfoCard.isVisible():
self.curSongInfoCard.aniHide()
self.lastSongInfoCard.hide()
self.nextSongInfoCard.hide()
elif self.height() > 320:
if not self.curSongInfoCard.isVisible():
self.curSongInfoCard.aniShow()
else:
self.curSongInfoCard.show()
self.lastSongInfoCard.show()
self.nextSongInfoCard.show()
def __setQss(self):
""" 设置层叠样式 """
with open(r"app\resource\css\playInterface.qss", encoding="utf-8") as f:
self.setStyleSheet(f.read())
def eventFilter(self, obj, e: QEvent):
""" 过滤事件 """
if obj == self.progressBar:
if e.type() in [QEvent.MouseButtonPress, QEvent.MouseButtonDblClick]:
return True
return super().eventFilter(obj, e)
def __cycleLeftShift(self):
""" 循环左移 """
self.loopMode = self.CYCLE_LEFT_SHIFT
self.__setAnimation(
self.curSongInfoCardAni, self.curSongInfoCard, -self.width()
)
self.__setAnimation(self.nextSongInfoCardAni, self.nextSongInfoCard, 0)
self.aniGroup.removeAnimation(self.lastSongInfoCardAni)
self.aniGroup.start()
def __cycleRightShift(self):
""" 循环右移 """
self.loopMode = self.CYCLE_RIGHT_SHIFT
self.__setAnimation(self.curSongInfoCardAni, self.curSongInfoCard, self.width())
self.__setAnimation(self.lastSongInfoCardAni, self.lastSongInfoCard, 0)
self.aniGroup.removeAnimation(self.nextSongInfoCardAni)
self.aniGroup.start()
def __setAnimation(self, animation: QPropertyAnimation, songInfoCard, endX):
""" 设置动画 """
animation.setEasingCurve(QEasingCurve.OutQuart)
animation.setTargetObject(songInfoCard)
animation.setDuration(500)
# 设置起始值
animation.setStartValue(
QRect(
songInfoCard.x(),
songInfoCard.y(),
songInfoCard.width(),
songInfoCard.height(),
)
)
# 设置结束值
animation.setEndValue(
QRect(endX, songInfoCard.y(), songInfoCard.width(), songInfoCard.height())
)
def __switchSongInfoCard(self):
""" 交换对底层歌曲卡对象的引用 """
# 循环左移
if self.loopMode == self.CYCLE_LEFT_SHIFT:
self.__resetRef(moveDirection=0)
# 更新动画组
self.aniGroup.addAnimation(self.lastSongInfoCardAni)
# 移动底层对象
self.songInfoCard_list[[2, 0, 1][self.shiftLeftTime]].move(
self.width(), self.height() - 106
)
# 更新下标
self.currentIndex += 1
if self.currentIndex != len(self.playlist) - 1:
# 更新歌曲信息卡
self.updateCards()
# 循环右移
elif self.loopMode == self.CYCLE_RIGHT_SHIFT:
self.__resetRef(moveDirection=1)
# 更新动画组
self.aniGroup.addAnimation(self.nextSongInfoCardAni)
# 移动底层对象
self.songInfoCard_list[[0, 2, 1][self.shiftRightTime]].move(
-self.width(), self.height() - 106
)
# 更新下标
self.currentIndex -= 1
if self.currentIndex != 0:
self.updateCards()
# 完成未完成的移位动作
if self.__unCompleteShift_list:
index = self.__unCompleteShift_list.pop(0)
self.__completeShift(index)
def updateCards(self):
""" 更新三个歌曲信息卡 """
self.curSongInfoCard.updateCard(self.playlist[self.currentIndex])
if self.currentIndex >= 1:
self.lastSongInfoCard.updateCard(self.playlist[self.currentIndex - 1])
if self.currentIndex <= len(self.playlist) - 2:
self.nextSongInfoCard.updateCard(self.playlist[self.currentIndex + 1])
def __resetRef(self, moveDirection=0):
""" 设置变量对底层对象的引用,moveDirection = 0 代表左移,moveDirection = 1 代表右移 """
# 循环左移
if moveDirection == 0:
self.shiftLeftTime = (self.shiftLeftTime + 1) % 3
self.shiftRightTime = (self.shiftRightTime - 1) % 3
if self.shiftLeftTime == 0:
self.__resetRefIndex(0, 1, 2)
elif self.shiftLeftTime == 1:
self.__resetRefIndex(1, 2, 0)
elif self.shiftLeftTime == 2:
self.__resetRefIndex(2, 0, 1)
# 循环右移
elif moveDirection == 1:
self.shiftLeftTime = (self.shiftLeftTime - 1) % 3
self.shiftRightTime = (self.shiftRightTime + 1) % 3
if self.shiftRightTime == 0:
self.__resetRefIndex(0, 1, 2)
elif self.shiftRightTime == 1:
self.__resetRefIndex(2, 0, 1)
elif self.shiftRightTime == 2:
self.__resetRefIndex(1, 2, 0)
def __resetRefIndex(self, lastIndex, curIndex, nextIndex):
""" refsetFunc的子函数 """
self.curSongInfoCard = self.songInfoCard_list[curIndex]
self.lastSongInfoCard = self.songInfoCard_list[lastIndex]
self.nextSongInfoCard = self.songInfoCard_list[nextIndex]
def setCurrentIndex(self, index):
""" 更新当前下标并移动和更新歌曲信息卡 """
if self.playlist:
# 新的下标大于当前下标时,歌曲卡左移
if index != self.currentIndex:
if self.aniGroup.state() != QAbstractAnimation.Running:
self.__completeShift(index)
else:
self.__unCompleteShift_list.append(index)
elif index == self.currentIndex:
self.updateCards()
self.needToEmitSignal = True
def setPlaylist(self, playlist, isResetIndex: bool = True):
""" 更新播放列表 """
self.playlist = playlist
self.currentIndex = 0 if isResetIndex else self.currentIndex
if playlist:
self.curSongInfoCard.updateCard(self.playlist[0])
self.curSongInfoCard.show()
if len(self.playlist) > 1:
self.nextSongInfoCard.updateCard(self.playlist[1])
else:
self.curSongInfoCard.hide()
def __completeShift(self, index):
""" 完成移位,只在调用setCurrentIndex时调用 """
if index > self.currentIndex:
self.currentIndex = index - 1
self.nextSongInfoCard.updateCard(self.playlist[index])
self.__cycleLeftShift()
elif index < self.currentIndex:
self.currentIndex = index + 1
self.lastSongInfoCard.updateCard(self.playlist[index])
self.__cycleRightShift()
class SongInfoCard(QWidget):
""" 歌曲信息卡 """
def __init__(self, songInfo: dict = None, parent=None):
super().__init__(parent)
self.resize(320, 55)
# 创建小部件
self.songNameLabel = QLabel(self)
self.songerNameLabel = QLabel(self)
self.opacityEffect = QGraphicsOpacityEffect(self)
self.ani = QPropertyAnimation(self.opacityEffect, b"opacity")
# 初始化
self.__initWidget()
# 设置窗口信息
self.updateCard(songInfo)
def __initWidget(self):
""" 初始化小部件 """
self.setFixedHeight(55)
self.opacityEffect.setOpacity(1)
self.setGraphicsEffect(self.opacityEffect)
self.setAttribute(Qt.WA_TranslucentBackground)
self.songNameLabel.setProperty("name", "smallestModeSongNameLabel")
self.songerNameLabel.setProperty("name", "smallestModeSongerNameLabel")
def __setSongInfo(self, songInfo: dict):
""" 设置标签信息 """
if not songInfo:
songInfo = {}
self.songName = songInfo.get("songName", "未知歌曲") # type:str
self.songerName = songInfo.get("songer", "未知歌手") # type:str
self.songNameLabel.setText(self.songName)
self.songerNameLabel.setText(self.songerName)
def updateCard(self, songInfo: dict):
""" 更新窗口 """
self.__setSongInfo(songInfo)
self.__adjustLabel()
def __adjustLabel(self):
""" 根据当前窗口的宽度设置标签文本和位置 """
fontMetrics = QFontMetrics(QFont("Microsoft YaHei", 12, 75))
# 字符串的最大宽度
maxWidth = self.width() - 30
songNameWidth, songerNameWidth = 0, 0
# 调整歌名
for index, char in enumerate(self.songName):
if songNameWidth + fontMetrics.width(char) > maxWidth:
self.songNameLabel.setText(self.songName[:index])
break
songNameWidth += fontMetrics.width(char)
self.songNameLabel.setFixedWidth(songNameWidth)
# 调整歌手名
fontMetrics = QFontMetrics(QFont("Microsoft YaHei", 11))
for index, char in enumerate(self.songerName):
if songerNameWidth + fontMetrics.width(char) > maxWidth:
self.songerNameLabel.setText(self.songerName[:index])
break
songerNameWidth += fontMetrics.width(char)
self.songerNameLabel.setFixedWidth(songerNameWidth)
# 调整标签位置
self.songNameLabel.move(int(self.width() / 2 - songNameWidth / 2), 0)
self.songerNameLabel.move(int(self.width() / 2 - songerNameWidth / 2), 30)
def resizeEvent(self, e):
""" 改变窗口大小时调整标签 """
super().resizeEvent(e)
self.__adjustLabel()
def aniHide(self):
""" 淡出 """
self.ani.setStartValue(1)
self.ani.setEndValue(0)
self.ani.setDuration(150)
self.ani.finished.connect(self.__hideAniSlot)
self.ani.start()
def aniShow(self):
""" 淡入 """
super().show()
self.ani.setStartValue(0)
self.ani.setEndValue(1)
self.ani.setDuration(150)
self.ani.start()
def __hideAniSlot(self):
""" 淡出动画完成的槽函数 """
self.ani.disconnect()
super().hide()
| [
"1319158137@qq.com"
] | 1319158137@qq.com |
d232aebeb075ae5e26d1a59f961d2a76c9897602 | 57eff74aacf082ef132d6944c0e4cd78fe6d29d7 | /tests/integration/templatetags/test_polls_tags.py | 960743983daa31ab044bb72c9558b8b0d0d67a89 | [
"BSD-3-Clause"
] | permissive | bitbike/django-machina | a3a850db5996907a38b48020b60e404b6f94ff59 | 0b772d5d3e107f41e901e4b488685ac179a10648 | refs/heads/master | 2020-04-03T04:17:30.900186 | 2018-10-24T02:47:33 | 2018-10-24T02:47:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,407 | py | import pytest
from django.contrib.auth.models import AnonymousUser
from django.template import Context
from django.template.base import Template
from django.test.client import RequestFactory
from machina.core.db.models import get_model
from machina.core.loading import get_class
from machina.test.factories import GroupFactory
from machina.test.factories import PostFactory
from machina.test.factories import TopicPollFactory
from machina.test.factories import TopicPollOptionFactory
from machina.test.factories import TopicPollVoteFactory
from machina.test.factories import UserFactory
from machina.test.factories import create_category_forum
from machina.test.factories import create_forum
from machina.test.factories import create_topic
Forum = get_model('forum', 'Forum')
Post = get_model('forum_conversation', 'Post')
Topic = get_model('forum_conversation', 'Topic')
PermissionHandler = get_class('forum_permission.handler', 'PermissionHandler')
assign_perm = get_class('forum_permission.shortcuts', 'assign_perm')
@pytest.mark.django_db
class BasePollsTagsTestCase(object):
@pytest.fixture(autouse=True)
def setup(self):
self.loadstatement = '{% load forum_polls_tags %}'
self.request_factory = RequestFactory()
self.g1 = GroupFactory.create()
self.u1 = UserFactory.create()
self.u2 = UserFactory.create()
self.u1.groups.add(self.g1)
self.u2.groups.add(self.g1)
self.moderators = GroupFactory.create()
self.moderator = UserFactory.create()
self.moderator.groups.add(self.moderators)
self.superuser = UserFactory.create(is_superuser=True)
# Permission handler
self.perm_handler = PermissionHandler()
# Set up a top-level category
self.top_level_cat = create_category_forum()
# Set up some forums
self.forum_1 = create_forum(parent=self.top_level_cat)
self.forum_2 = create_forum(parent=self.top_level_cat)
# Set up some topics and posts
self.forum_1_topic = create_topic(forum=self.forum_1, poster=self.u1)
self.forum_2_topic = create_topic(forum=self.forum_2, poster=self.u2)
self.post_1 = PostFactory.create(topic=self.forum_1_topic, poster=self.u1)
self.post_2 = PostFactory.create(topic=self.forum_2_topic, poster=self.u2)
self.poll_1 = TopicPollFactory.create(topic=self.forum_1_topic)
self.poll_2 = TopicPollFactory.create(topic=self.forum_2_topic)
# Assign some permissions
assign_perm('can_see_forum', self.g1, self.forum_1)
assign_perm('can_read_forum', self.g1, self.forum_1)
assign_perm('can_edit_own_posts', self.g1, self.forum_1)
assign_perm('can_delete_own_posts', self.g1, self.forum_1)
assign_perm('can_reply_to_topics', self.g1, self.forum_1)
assign_perm('can_see_forum', self.moderators, self.forum_1)
assign_perm('can_read_forum', self.moderators, self.forum_1)
assign_perm('can_edit_own_posts', self.moderators, self.forum_1)
assign_perm('can_delete_own_posts', self.moderators, self.forum_1)
assign_perm('can_edit_posts', self.moderators, self.forum_1)
assign_perm('can_delete_posts', self.moderators, self.forum_1)
assign_perm('can_vote_in_polls', self.g1, self.forum_1)
class TestHasBeenCompletedByTag(BasePollsTagsTestCase):
def test_can_tell_if_an_authenticated_user_has_already_voted_in_a_poll(self):
# Setup
def get_rendered(poll, user):
request = self.request_factory.get('/')
request.user = user
t = Template(
self.loadstatement + '{% if poll|has_been_completed_by:request.user %}HAS_VOTED'
'{% else %}HAS_NOT_VOTED{% endif %}')
c = Context({'poll': poll, 'request': request})
rendered = t.render(c)
return rendered
# Setup
poll_option_1 = TopicPollOptionFactory.create(poll=self.poll_1)
TopicPollOptionFactory.create(poll=self.poll_1)
TopicPollVoteFactory.create(poll_option=poll_option_1, voter=self.u1)
# Run & check
assert get_rendered(self.poll_1, self.u1) == 'HAS_VOTED'
assert get_rendered(self.poll_2, self.u1) == 'HAS_NOT_VOTED'
def test_can_if_an_anonymous_user_has_already_voted_in_a_poll(self):
# Setup
def get_rendered(poll, user):
request = self.request_factory.get('/')
request.user = user
t = Template(self.loadstatement + '{% if poll|has_been_completed_by:request.user %}'
'HAS_VOTED{% else %}HAS_NOT_VOTED{% endif %}')
c = Context({'poll': poll, 'request': request})
rendered = t.render(c)
return rendered
u2 = AnonymousUser()
u2.forum_key = 'dummy'
u3 = AnonymousUser()
poll_option_1 = TopicPollOptionFactory.create(poll=self.poll_1)
TopicPollOptionFactory.create(poll=self.poll_1)
TopicPollVoteFactory.create(poll_option=poll_option_1, anonymous_key='dummy')
# Run & check
assert get_rendered(self.poll_1, u2) == 'HAS_VOTED'
assert get_rendered(self.poll_2, u2) == 'HAS_NOT_VOTED'
assert get_rendered(self.poll_2, u3) == 'HAS_NOT_VOTED'
assert get_rendered(self.poll_2, u3) == 'HAS_NOT_VOTED'
| [
"morgan.aubert@zoho.com"
] | morgan.aubert@zoho.com |
d7d0d39eadd20554ddc16e5bd2fc3b0ce398ec14 | eea1c66c80784d4aefeb0d5fd2e186f9a3b1ac6e | /atcoder/aising2020/c.py | 8197aed672f741ca2fbecff7d9be69c326c2c4ba | [] | no_license | reo11/AtCoder | 4e99d6f40d8befe264761e3b8c33d3a6b7ba0fe9 | 69c6d67f05cb9190d8fb07204488cd7ce4d0bed2 | refs/heads/master | 2023-08-28T10:54:50.859288 | 2023-08-22T18:52:47 | 2023-08-22T18:52:47 | 162,085,118 | 4 | 0 | null | 2023-07-01T14:17:28 | 2018-12-17T06:31:10 | Python | UTF-8 | Python | false | false | 340 | py | n = int(input())
ans = []
f = [0 for _ in range(10 ** 4 + 1)]
for x in range(1, 101):
for y in range(1, 101):
for z in range(1, 101):
tmp = x ** 2 + y ** 2 + z ** 2 + x * y + y * z + z * x
if tmp <= n:
f[tmp] += 1
for i in range(1, n + 1):
ans.append(str(f[i]))
print("\n".join(ans))
| [
"reohirao116@gmail.com"
] | reohirao116@gmail.com |
5d2768e0798d279f7587d06124782c383d8457ef | 9c8abb2013189633914073a668495122db0f77c1 | /settings.py | 37b1cc3ceb02f7fd5e1719a71c33eb74aed298a6 | [] | no_license | AshkenSC/Python-Space-Invaders | 160a63e88a74f0b21ea8a234c556c72feab75a54 | 84d7447b04065dd13b048d4f3583b81a31fbe8ea | refs/heads/master | 2020-04-15T10:17:16.399152 | 2019-01-30T15:27:20 | 2019-01-30T15:27:20 | 164,589,478 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,643 | py | class Settings():
'''the class to store all the settings of Space Invaders'''
def __init__(self):
'''initiate STATIC game settings'''
# screen settings
self.screen_width = 800
self.screen_height = 600
self.bg_color = (230, 230, 230)
# player ship settings
self.ship_speed_factor = 2.0
self.ship_limit = 3
# bullet settings
self.bullet_speed_factor = 2.5
self.bullet_width = 200
self.bullet_height = 18
self.bullet_color = 255, 0, 0
self.bullets_allowed = 5
# alien settings
self.fleet_drop_speed = 100
# game DIFFICULTY speed-up scale
self.speedup_scale = 1.1
# alien POINTS speed-up scale
# alien pts should increase along with difficulty variety
self.score_scale = 1.5
# call dynamic settings method
self.initialize_dynamic_settings()
def initialize_dynamic_settings(self):
'''initialize DYNAMIC game settings'''
self.ship_speed_factor = 1.5
self.bullet_speed_factor = 3
self.alien_speed_factor = 1
# fleet_direction = 1: move towards right
# fleet_direction = -1: move towards left
self.fleet_direction = 1
# SCORE record
self.alien_points = 50
def increase_speed(self):
'''SPEED increase and alien SCORE settings'''
self.ship_speed_factor *= self.speedup_scale
self.bullet_speed_factor *= self.speedup_scale
self.alien_speed_factor *= self.speedup_scale
self.alien_points = int(self.alien_points * self.score_scale) | [
"393940378@qq.com"
] | 393940378@qq.com |
593c706d70c4ae49d0bcd2ce03ea406919a56f68 | 5b221c2809d82cf13a2b24a56589943315cdb381 | /2019/2019-9.py | 254abf9fce3f220ac0c39571eac5cd1d09c3d711 | [] | no_license | Bruce-V/CS-BM25 | c2cd797e9be2fc55af9c8944882fd55109ebee61 | 2401f0ddb24c1712b13c0c96e13565f60d48705d | refs/heads/main | 2023-01-04T23:29:20.906427 | 2020-11-09T08:44:22 | 2020-11-09T08:44:22 | 259,228,835 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,622 | py | # Copyright 2020 zicheng Zhang(18551701375@163.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pymongo
import re
from math import log
myclient =pymongo.MongoClient("mongodb://localhost:27017/")
mydb = myclient["pubmed"]
mywords = mydb["freqwords3"] #pubmed中所有的词频、化学词、关键词和主题词表
mytopic=mydb["topics2019"]#pubmed中的主题词相关文献列表
mypapers=mydb["papers"]#pubmed中文献信息表
mytopicdb=myclient["cs2019_9"]
mydata=mytopicdb["cs2019_score_9"]#按词表长度改进过后的2次排序表
mycount = mytopicdb["cs2019_score_9_related"]#聚类后对应与主题相关联的文献
def sortsecond(myfreq,mydata,yuzhi):
k = 0
k1 = 1.2
b1 = 0.75
k2 = 1.2
b2 = 0.75
idf_gastrointestinal = log((29138919 - 203456 + 0.5) / (203456 + 0.5), 10)
idf_stromal = log((29138919 - 75278 + 0.5) / (75278 + 0.5), 10)
idf_exon = log((29138919 - 62186 + 0.5) / (62186 + 0.5), 10)
idf_kit = log((29138919 - 31493 + 0.5) / (31493 + 0.5), 10)
idf_a502=log((29138919 - 61 + 0.5) / (61 + 0.5), 10)
idf_502503 = log((29138919 - 12 + 0.5) / (12 + 0.5), 10)
idf_ala502tyr503 = log((29138919 - 1 + 0.5) / (1 + 0.5), 10)
idf_ele_1 = log((13670358 - 0 + 0.5) / (367 + 0.5), 10)
idf_ele_2 = log((13670358 - 0 + 0.5) / (0 + 0.5), 10)
idf_ele_3 = log((13670358 - 0 + 0.5) / (367 + 0.5), 10)
idf_ele_4 = log((13670358 - 0 + 0.5) / (0 + 0.5), 10)
idf_ele_5 = log((13670358 - 0 + 0.5) / (0 + 0.5), 10)
idf_eleM_1 = log((25389659 - 5700 + 0.5) / (5700 + 0.5), 10)
idf_eleM_2 = log((25389659 - 46355 + 0.5) / (46355 + 0.5), 10)
idf_eleM_3 = log((25389659 - 5457 + 0.5) / (5457 + 0.5), 10)
idf_eleM_4 = log((25389659 - 11770 + 0.5) / (11770 + 0.5), 10)
idf_eleM_5 = log((25389659 - 0 + 0.5) / (0 + 0.5), 10)
idf_eleM_6 = log((25389659 - 17437618 + 0.5) / (17437618 + 0.5), 10)
idf_eleM_7 = log((25389659 - 8002162 + 0.5) / (8002162 + 0.5), 10)
idf_eleM_8 = log((25389659 - 2842020 + 0.5) / (2842020 + 0.5), 10)
idf_eleM_9 = log((25389659 - 4029038 + 0.5) / (4029038 + 0.5), 10)
idf_eleM_10 = log((25389659 - 4785026 + 0.5) / (4785026 + 0.5), 10)
idf_eleK_1 = log((5435471 - 953 + 0.5) / (953 + 0.5), 10)
idf_eleK_2 = log((5435471 - 0 + 0.5) / (0 + 0.5), 10)
for x in myfreq.find({}, {'PMID', 'wordfreq', 'ChemicalNameList', 'MeshHeadingNameList', 'KeywordsList'},
no_cursor_timeout=True):
ss1 = 0
ss2 = 0
ss4 = 0
len_freq = 0
gastrointestinal_score = 0
stromal_score = 0
exon_score = 0
kit_score = 0
a502_score=0
s502503_score=0
ala502tyr503_score=0
gx = 0
gx1 = 0
gx2 = 0
gx3 = 0
cop = re.compile("[^\u4e00-\u9fa5^a-z^A-Z^0-9]") # 匹配不是中文、大小写、数字的其他字符
ChemicalNameList = x['ChemicalNameList']
MeshHeadingNameList = x['MeshHeadingNameList']
KeywordsList = x['KeywordsList']
wordfreq = x['wordfreq']
gastrointestinal = [True for x in wordfreq.items() if 'gastrointestinal' in x]
stromal = [True for x in wordfreq.items() if 'stromal' in x]
kit = [True for x in wordfreq.items() if 'kit' in x]
exon = [True for x in wordfreq.items() if 'exon' in x]
a502 = [True for x in wordfreq.items() if 'a502' in x]
y503 = [True for x in wordfreq.items() if 'y503' in x]
s502503=[True for x in wordfreq.items() if '502-503' in x]
ala502tyr503=[True for x in wordfreq.items() if 'ala502tyr503' in x]
# ---------------摘要统计-------------------#
for key in wordfreq:
len_freq = len_freq + wordfreq[key]
for key in wordfreq:
if 'gastrointestinal' in key:
gastrointestinal_score = gastrointestinal_score + wordfreq[key]
for key in wordfreq:
if 'stromal' in key:
stromal_score = stromal_score + wordfreq[key]
for key in wordfreq:
if 'kit' in key:
kit_score = kit_score + wordfreq[key]
for key in wordfreq:
if 'exon' in key:
exon_score = exon_score + wordfreq[key]
if len(a502) != 0 and a502[0] and len(y503) != 0 and y503[0]:
for key in wordfreq:
if 'a502' in key:
a502_score = a502_score + wordfreq[key]
for key in wordfreq:
if '502-503' in key:
s502503_score = s502503_score + wordfreq[key]
for key in wordfreq:
if 'ala502tyr503' in key:
ala502tyr503_score = ala502tyr503_score + wordfreq[key]
#---------------共现分析摘要-------------------#
if len(gastrointestinal) != 0 and gastrointestinal[0] and len(stromal) != 0 and stromal[0]:
if len(kit) != 0 and kit[0] and len(exon) != 0 and exon[0]:
gx=idf_kit+idf_exon
if len(a502) != 0 and a502[0] and len(y503) != 0 and y503[0]:
gx1=idf_a502
if len(s502503) != 0 and s502503[0]:
gx1=idf_502503
if len(ala502tyr503) != 0 and ala502tyr503[0]:
gx1=idf_ala502tyr503
# ---------------共现分析化学-------------------#
# ---------------共现分析医学主题词-------------------#
# ---------------共现分析关键字-------------------#
bm25_gastrointestinal_score = (((k1 + 1) * gastrointestinal_score) / ((k1 * (b1 + (1 - b1) * (len_freq / 83))) + gastrointestinal_score))
bm25_stromal_score = (((k1 + 1) * stromal_score) / ((k1 * (b1 + (1 - b1) * (len_freq / 83))) + stromal_score))
bm25_exon_score = (((k1 + 1) * exon_score) / ((k1 * (b1 + (1 - b1) * (len_freq / 83))) + exon_score))
bm25_kit_score = (((k1 + 1) * kit_score) / ((k1 * (b1 + (1 - b1) * (len_freq / 83))) + kit_score))
bm25_s502503_score = (((k1 + 1) * s502503_score) / ((k1 * (b1 + (1 - b1) * (len_freq / 83))) + s502503_score))
bm25_a502_score = (((k1 + 1) * a502_score) / ((k1 * (b1 + (1 - b1) * (len_freq / 83))) + a502_score))
bm25_ala502tyr503_score = (((k1 + 1) * ala502tyr503_score) / ((k1 * (b1 + (1 - b1) * (len_freq / 83))) + ala502tyr503_score))
bm25_ab_score = idf_gastrointestinal * bm25_gastrointestinal_score + idf_stromal * bm25_stromal_score
+idf_exon * bm25_exon_score + idf_502503 * bm25_s502503_score+idf_kit*bm25_kit_score+idf_a502*bm25_a502_score+idf_ala502tyr503*bm25_ala502tyr503_score
idf_para = [{str(gastrointestinal_score): idf_gastrointestinal}, {str(stromal_score): idf_stromal},
{str(exon_score): idf_exon}, {str(s502503_score): idf_502503},{str(kit_score): idf_kit},{str(ala502tyr503_score): idf_ala502tyr503},{str(a502_score): idf_a502}]
for ele in ChemicalNameList:
if 'Chromosome 9, trisomy' == ele['NameOfSubstance']:
ss1 = ss1 + idf_ele_1
break
for ele in ChemicalNameList:
if 'Trisomy' == ele['NameOfSubstance']:
ss1 = ss1 + idf_ele_2
break
for ele in ChemicalNameList:
if 'Chromosomes, Human, Pair 9' == ele['NameOfSubstance']:
ss1 = ss1 + idf_ele_3
break
for ele in ChemicalNameList:
if 'Exons' == ele['NameOfSubstance']:
ss1 = ss1 + idf_ele_4
break
for ele in ChemicalNameList:
if 'Gastrointestinal Stromal Tumors' == ele['NameOfSubstance']:
ss1 = ss1 + idf_ele_5
break
for eleM in MeshHeadingNameList:
if 'Gastrointestinal Stromal Tumors' == eleM['MeshHeadingName']:
ss2 = ss2 + idf_eleM_1
break
for eleM in MeshHeadingNameList:
if 'Exons' == eleM['MeshHeadingName']:
ss2 = ss2 + idf_eleM_2
break
for eleM in MeshHeadingNameList:
if 'Chromosomes, Human, Pair 9' == eleM['MeshHeadingName']:
ss2 = ss2 + idf_eleM_3
break
for eleM in MeshHeadingNameList:
if 'Trisomy' == eleM['MeshHeadingName']:
ss2 = ss2 + idf_eleM_4
break
for eleM in MeshHeadingNameList:
if 'Chromosome 9, trisomy' == eleM['MeshHeadingName']:
ss2 = ss2 + idf_eleM_5
break
for eleM in MeshHeadingNameList:
if re.findall(r'(Human|Humans)', eleM['MeshHeadingName']):
ss2 = ss2 + idf_eleM_6
break
for eleM in MeshHeadingNameList:
if 'Male' in eleM['MeshHeadingName']:
ss2 = ss2 + idf_eleM_7
break
for eleM in MeshHeadingNameList:
if 'Middle Aged' == eleM['MeshHeadingName']:
ss2 = ss2 + idf_eleM_9
break
for eleM in MeshHeadingNameList:
if re.findall(r'(Adult|Adults)', eleM['MeshHeadingName']):
ss2 = ss2 + idf_eleM_10
break
for eleK in KeywordsList:
if 'gastrointestinal stromal tumor' in str(eleK).lower():
ss4 = ss4 + idf_eleK_1
break
for eleK in KeywordsList:
if 'kit exon 9' in str(eleK).lower():
ss4 = ss4 + idf_eleK_2
break
total_gx = (gx + gx1 + gx2 + gx3)
cmk_len = len(ChemicalNameList) + len(MeshHeadingNameList) + len(KeywordsList)
bm25_cmk_len = ss1 + ss2 + ss4
bm25_cmk_score = (((k2 + 1) * bm25_cmk_len) / ((k2 * (b2 + (1 - b2) * (cmk_len / 13))) + bm25_cmk_len))
bm25_score = bm25_ab_score + bm25_cmk_score + total_gx
if (bm25_score > yuzhi):
mydict = {"PMID": x['PMID'], "ab_score": bm25_ab_score, "idf_para": idf_para,
"cmk_len": cmk_len, "cmk_freq": bm25_cmk_len, "bm25_cmk_score": bm25_cmk_score,
"gx": total_gx,
"bm25_score": bm25_score,
"ChemicalNameList": x['ChemicalNameList'],
"MeshHeadingNameList": x['MeshHeadingNameList'], "KeywordsList": x['KeywordsList']}
y = mydata.insert_one(mydict)
k = k + 1
print(str(y) + '---------' + str(k))
def count(mysort,mycount,topic):
for x in mysort.find({},
{'PMID', 'ab_score', 'idf_para', 'cmk_len', 'cmk_freq', 'bm25_cmk_score', 'gx', 'bm25_score',
'ChemicalNameList', 'MeshHeadingNameList', 'KeywordsList'}):
kk = 0
for y in mytopic.find({"topic": topic}, {'PMID', 'relate'}):
if x['PMID'] == y['PMID']:
mydict = {"PMID": x['PMID'], "related": y['relate'], "ab_score": x["ab_score"],
"idf_para": x['idf_para'],
"cmk_len": x['cmk_len'], "cmk_freq": x['cmk_freq'], 'bm25_cmk_score': x['bm25_cmk_score'],
'gx': x['gx'],
"bm25_score": x['bm25_score'],
"ChemicalNameList": x['ChemicalNameList'], "MeshHeadingNameList": x['MeshHeadingNameList'],
"KeywordsList": x['KeywordsList']}
ss = mycount.insert_one(mydict)
print(ss)
kk = kk + 1
if (kk == 0):
mydict = {"PMID": x['PMID'], "related": -1, "ab_score": x["ab_score"], "idf_para": x['idf_para'],
"cmk_len": x['cmk_len'], "cmk_freq": x['cmk_freq'], 'bm25_cmk_score': x['bm25_cmk_score'],
'gx': x['gx'],
"bm25_score": x['bm25_score'],
"ChemicalNameList": x['ChemicalNameList'], "MeshHeadingNameList": x['MeshHeadingNameList'],
"KeywordsList": x['KeywordsList']}
ss = mycount.insert_one(mydict)
print(ss)
if __name__ == '__main__':
sortsecond(mywords,mydata,8)
count(mydata,mycount,"9")
| [
"1714624946@qq.com"
] | 1714624946@qq.com |
7020eb68faae0860c627ff2294f7634b5de374a3 | dfa8337a94c6cdc8347589a8daf75accf556dd49 | /.local/bin/pylint | 7e1094266bdd2ee60efac6f0cdfe474d15bf1839 | [] | no_license | momentum-morehouse/django-uptact-GabeJunior-1196 | 2689fb43fd36ab676d78fbbbbd7dcb758ef6a98e | 84964e3b9f98d3fe6642c5918ec9f506bb6dcf06 | refs/heads/master | 2022-11-14T18:26:57.941198 | 2020-07-12T22:09:20 | 2020-07-12T22:09:20 | 277,572,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | #!/opt/virtualenvs/python3/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_pylint
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run_pylint())
| [
"replituser@example.com"
] | replituser@example.com | |
682721200239fbac0506bf9be3b4a686c6b4db46 | ed92b38fcb124629c67d93835425a33a717f7177 | /Tools/Scripts/libraries/webkitscmpy/webkitscmpy/test/canonicalize_unittest.py | d7179541e56298505cb90bd223c7a523a57895fc | [] | no_license | nikolaszimmermann/WebKitIgalia | 725ef1f00f9c77bb8762e4b9b46e691419513ebe | 8fc0a5537b76b55cf4f27a9892a6310cf28c7ade | refs/heads/main | 2022-07-31T20:38:37.715991 | 2021-09-11T08:59:37 | 2021-10-18T21:55:59 | 420,998,021 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,002 | py | # Copyright (C) 2020-2021 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from webkitcorepy import OutputCapture, testing
from webkitcorepy.mocks import Time as MockTime
from webkitscmpy import program, mocks, local, Commit, Contributor
class TestCanonicalize(testing.PathTestCase):
basepath = 'mock/repository'
def setUp(self):
super(TestCanonicalize, self).setUp()
os.mkdir(os.path.join(self.path, '.git'))
os.mkdir(os.path.join(self.path, '.svn'))
def test_invalid(self):
with OutputCapture(), mocks.local.Git(), mocks.local.Svn(self.path), MockTime:
self.assertEqual(1, program.main(
args=('canonicalize',),
path=self.path,
))
def test_no_commits(self):
with OutputCapture() as captured, mocks.local.Git(self.path), mocks.local.Svn(), MockTime:
self.assertEqual(0, program.main(
args=('canonicalize',),
path=self.path,
))
self.assertEqual(captured.stdout.getvalue(), 'No local commits to be edited\n')
def test_formated_identifier(self):
with OutputCapture() as captured, mocks.local.Git(self.path) as mock, mocks.local.Svn(), MockTime:
contirbutors = Contributor.Mapping()
contirbutors.create('\u017dan Dober\u0161ek', 'zdobersek@igalia.com')
mock.commits[mock.default_branch].append(Commit(
hash='38ea50d28ae394c9c8b80e13c3fb21f1c262871f',
branch=mock.default_branch,
author=Contributor('\u017dan Dober\u0161ek', emails=['zdobersek@igalia.com']),
identifier=mock.commits[mock.default_branch][-1].identifier + 1,
timestamp=1601669000,
message='New commit\n',
))
self.assertEqual(0, program.main(
args=('canonicalize', '-v',),
path=self.path,
contributors=contirbutors,
identifier_template='Canonical link: https://commits.webkit.org/{}',
))
commit = local.Git(self.path).commit(branch=mock.default_branch)
self.assertEqual(commit.author, contirbutors['zdobersek@igalia.com'])
self.assertEqual(commit.message, 'New commit\nCanonical link: https://commits.webkit.org/6@main')
self.assertEqual(
captured.stdout.getvalue(),
'Rewrite 38ea50d28ae394c9c8b80e13c3fb21f1c262871f (1/1) (--- seconds passed, remaining --- predicted)\n'
'Overwriting 38ea50d28ae394c9c8b80e13c3fb21f1c262871f\n'
'1 commit successfully canonicalized!\n',
)
def test_existing_identifier(self):
with OutputCapture() as captured, mocks.local.Git(self.path) as mock, mocks.local.Svn(), MockTime:
contirbutors = Contributor.Mapping()
contirbutors.create('Jonathan Bedard', 'jbedard@apple.com')
mock.commits[mock.default_branch].append(Commit(
hash='38ea50d28ae394c9c8b80e13c3fb21f1c262871f',
branch=mock.default_branch,
author=Contributor('Jonathan Bedard', emails=['jbedard@apple.com']),
identifier=mock.commits[mock.default_branch][-1].identifier + 1,
timestamp=1601668000,
message='New commit\nIdentifier: {}@{}'.format(
mock.commits[mock.default_branch][-1].identifier + 1,
mock.default_branch,
),
))
self.assertEqual(0, program.main(
args=('canonicalize', '-v',),
path=self.path,
contributors=contirbutors,
))
commit = local.Git(self.path).commit(branch=mock.default_branch)
self.assertEqual(commit.author, contirbutors['jbedard@apple.com'])
self.assertEqual(commit.message, 'New commit\nIdentifier: 6@main')
self.assertEqual(
captured.stdout.getvalue(),
'Rewrite 38ea50d28ae394c9c8b80e13c3fb21f1c262871f (1/1) (--- seconds passed, remaining --- predicted)\n'
'Overwriting 38ea50d28ae394c9c8b80e13c3fb21f1c262871f\n'
'1 commit successfully canonicalized!\n',
)
def test_git_svn(self):
with OutputCapture() as captured, mocks.local.Git(self.path, git_svn=True) as mock, mocks.local.Svn(), MockTime:
contirbutors = Contributor.Mapping()
contirbutors.create('Jonathan Bedard', 'jbedard@apple.com')
mock.commits[mock.default_branch].append(Commit(
hash='766609276fe201e7ce2c69994e113d979d2148ac',
branch=mock.default_branch,
author=Contributor('jbedard@apple.com', emails=['jbedard@apple.com']),
identifier=mock.commits[mock.default_branch][-1].identifier + 1,
timestamp=1601668000,
revision=9,
message='New commit\n',
))
self.assertEqual(0, program.main(
args=('canonicalize', '-vv'),
path=self.path,
contributors=contirbutors,
))
commit = local.Git(self.path).commit(branch=mock.default_branch)
self.assertEqual(commit.author, contirbutors['jbedard@apple.com'])
self.assertEqual(
commit.message,
'New commit\n'
'Identifier: 6@main\n'
'git-svn-id: https://svn.example.org/repository/repository/trunk@9 268f45cc-cd09-0410-ab3c-d52691b4dbfc',
)
self.assertEqual(
captured.stdout.getvalue(),
'Rewrite 766609276fe201e7ce2c69994e113d979d2148ac (1/1) (--- seconds passed, remaining --- predicted)\n'
'Overwriting 766609276fe201e7ce2c69994e113d979d2148ac\n'
' GIT_AUTHOR_NAME=Jonathan Bedard\n'
' GIT_AUTHOR_EMAIL=jbedard@apple.com\n'
' GIT_COMMITTER_NAME=Jonathan Bedard\n'
' GIT_COMMITTER_EMAIL=jbedard@apple.com\n'
'1 commit successfully canonicalized!\n',
)
def test_branch_commits(self):
with OutputCapture() as captured, mocks.local.Git(self.path) as mock, mocks.local.Svn(), MockTime:
contirbutors = Contributor.Mapping()
contirbutors.create('Jonathan Bedard', 'jbedard@apple.com')
local.Git(self.path).checkout('branch-a')
mock.commits['branch-a'].append(Commit(
hash='f93138e3bf1d5ecca25fc0844b7a2a78b8e00aae',
branch='branch-a',
author=Contributor('jbedard@apple.com', emails=['jbedard@apple.com']),
branch_point=mock.commits['branch-a'][-1].branch_point,
identifier=mock.commits['branch-a'][-1].identifier + 1,
timestamp=1601668000,
message='New commit 1\n',
))
mock.commits['branch-a'].append(Commit(
hash='0148c0df0faf248aa133d6d5ad911d7cb1b56a5b',
branch='branch-a',
author=Contributor('jbedard@apple.com', emails=['jbedard@apple.com']),
branch_point=mock.commits['branch-a'][-1].branch_point,
identifier=mock.commits['branch-a'][-1].identifier + 1,
timestamp=1601669000,
message='New commit 2\n',
))
self.assertEqual(0, program.main(
args=('canonicalize', ),
path=self.path,
contributors=contirbutors,
))
commit_a = local.Git(self.path).commit(branch='branch-a~1')
self.assertEqual(commit_a.author, contirbutors['jbedard@apple.com'])
self.assertEqual(commit_a.message, 'New commit 1\nIdentifier: 2.3@branch-a')
commit_b = local.Git(self.path).commit(branch='branch-a')
self.assertEqual(commit_b.author, contirbutors['jbedard@apple.com'])
self.assertEqual(commit_b.message, 'New commit 2\nIdentifier: 2.4@branch-a')
self.assertEqual(
captured.stdout.getvalue(),
'Rewrite f93138e3bf1d5ecca25fc0844b7a2a78b8e00aae (1/2) (--- seconds passed, remaining --- predicted)\n'
'Rewrite 0148c0df0faf248aa133d6d5ad911d7cb1b56a5b (2/2) (--- seconds passed, remaining --- predicted)\n'
'2 commits successfully canonicalized!\n',
)
def test_number(self):
with OutputCapture() as captured, mocks.local.Git(self.path) as mock, mocks.local.Svn(), MockTime:
contirbutors = Contributor.Mapping()
contirbutors.create('Jonathan Bedard', 'jbedard@apple.com')
self.assertEqual(0, program.main(
args=('canonicalize', '--number', '3'),
path=self.path,
contributors=contirbutors,
))
self.assertEqual(local.Git(self.path).commit(identifier='5@main').message, 'Patch Series\nIdentifier: 5@main')
self.assertEqual(local.Git(self.path).commit(identifier='4@main').message, '8th commit\nIdentifier: 4@main')
self.assertEqual(local.Git(self.path).commit(identifier='3@main').message, '4th commit\nIdentifier: 3@main')
self.assertEqual(
captured.stdout.getvalue(),
'Rewrite 1abe25b443e985f93b90d830e4a7e3731336af4d (1/3) (--- seconds passed, remaining --- predicted)\n'
'Rewrite bae5d1e90999d4f916a8a15810ccfa43f37a2fd6 (2/3) (--- seconds passed, remaining --- predicted)\n'
'Rewrite d8bce26fa65c6fc8f39c17927abb77f69fab82fc (3/3) (--- seconds passed, remaining --- predicted)\n'
'3 commits successfully canonicalized!\n',
)
| [
"jbedard@apple.com"
] | jbedard@apple.com |
2a6375e0b917dbc1b0e991dbae5f952e1487d58f | 50b2a866c65e925dde2802ec1541885a7b0edb64 | /src/server_config.py | f43169e8ef471e0f0bc2428f2849a184726f1c90 | [] | no_license | sandumjacob/Dynamic-NIDS-Evaluation-Utility | 68aacc97f120bdc04abd37c3ea8c055923509cf1 | a9f6b286c435a39eb8a452a1b360e36b822a0f7a | refs/heads/master | 2021-03-12T06:47:46.154350 | 2020-03-12T19:59:55 | 2020-03-12T19:59:55 | 246,598,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | SERVER_IP = '127.0.0.1'
# SERVER_IP = '10.0.0.8'
SERVER_PORT = 5000
SERVER_BUFFER_SIZE = 1024
# How many clients to wait for connection from
CLIENT_COUNT = 1
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
8cd208cef461d1bf7645a4c31f9e5b7b4541aa5a | 250962c80383ecf9c2f94e2874c1e1f961f6a181 | /escpos/constants.py | a239e30a932526ceba5155148dd630c29d55b29d | [
"Apache-2.0"
] | permissive | kmee/pyescpos | aa59d6f2d1a9c99d3d9a55da1e1c543c49105da3 | b0a0040cd770c1658258a870caca1a33ff010460 | refs/heads/master | 2023-07-07T09:02:41.581454 | 2022-04-14T19:05:17 | 2022-04-14T19:05:17 | 106,326,996 | 0 | 1 | Apache-2.0 | 2022-04-20T18:29:21 | 2017-10-09T19:34:38 | Python | UTF-8 | Python | false | false | 1,339 | py | # -*- coding: utf-8 -*-
#
# escpos/constants.py
#
# Copyright 2015 Base4 Sistemas Ltda ME
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
DEFAULT_ENCODING = 'utf-8'
DEFAULT_ENCODING_ERRORS = 'strict'
CASHDRAWER_DEFAULT_DURATION = 200
"""Duration for cash drawer activation (kick) in milliseconds.
See :meth:`~escpos.impl.epson.GenericESCPOS.kick_drawer` method for details.
"""
BACKOFF_DEFAULT_MAXTRIES = 3
"""Number of tries before give up. See :func:`escpos.retry.backoff`"""
BACKOFF_DEFAULT_DELAY = 3
"""Delay between retries (in seconds). See :func:`escpos.retry.backoff`"""
BACKOFF_DEFAULT_FACTOR = 2
"""Multiply factor in which delay will be increased for the next retry.
See :func:`escpos.retry.backoff`.
"""
| [
"daniel@base4.com.br"
] | daniel@base4.com.br |
d67b04b1475e4e9ef6b21c70a5a033fc6675fea6 | e1ba0eb26a14be38c1b2fdf0902417d2552be913 | /REM/Tool/ghidra_9.1.2_PUBLIC/Ghidra/Features/Python/data/jython-2.7.1/Lib/BaseHTTPServer.py | 4e7ef9338fe77f110316df75d309a49a73e0985f | [
"Apache-2.0",
"LGPL-2.1-only",
"MIT"
] | permissive | dodieboy/Np_class | db7fb9d4f4eb0fe4d5bb38dc05e69a9cbd485cf0 | 5c6e517599a20658ee23bdbdd76f197f2f4a1b63 | refs/heads/master | 2022-11-15T10:29:30.587639 | 2022-06-24T12:25:45 | 2022-06-24T12:25:45 | 181,395,891 | 0 | 2 | MIT | 2020-05-30T06:22:17 | 2019-04-15T02:07:40 | C# | UTF-8 | Python | false | false | 22,905 | py | """HTTP server base class.
Note: the class in this module doesn't implement any HTTP request; see
SimpleHTTPServer for simple implementations of GET, HEAD and POST
(including CGI scripts). It does, however, optionally implement HTTP/1.1
persistent connections, as of version 0.3.
Contents:
- BaseHTTPRequestHandler: HTTP request handler base class
- test: test function
XXX To do:
- log requests even later (to capture byte count)
- log user-agent header and other interesting goodies
- send error log to separate file
"""
# See also:
#
# HTTP Working Group T. Berners-Lee
# INTERNET-DRAFT R. T. Fielding
# <draft-ietf-http-v10-spec-00.txt> H. Frystyk Nielsen
# Expires September 8, 1995 March 8, 1995
#
# URL: http://www.ics.uci.edu/pub/ietf/http/draft-ietf-http-v10-spec-00.txt
#
# and
#
# Network Working Group R. Fielding
# Request for Comments: 2616 et al
# Obsoletes: 2068 June 1999
# Category: Standards Track
#
# URL: http://www.faqs.org/rfcs/rfc2616.html
# Log files
# ---------
#
# Here's a quote from the NCSA httpd docs about log file format.
#
# | The logfile format is as follows. Each line consists of:
# |
# | host rfc931 authuser [DD/Mon/YYYY:hh:mm:ss] "request" ddd bbbb
# |
# | host: Either the DNS name or the IP number of the remote client
# | rfc931: Any information returned by identd for this person,
# | - otherwise.
# | authuser: If user sent a userid for authentication, the user name,
# | - otherwise.
# | DD: Day
# | Mon: Month (calendar name)
# | YYYY: Year
# | hh: hour (24-hour format, the machine's timezone)
# | mm: minutes
# | ss: seconds
# | request: The first line of the HTTP request as sent by the client.
# | ddd: the status code returned by the server, - if not available.
# | bbbb: the total number of bytes sent,
# | *not including the HTTP/1.0 header*, - if not available
# |
# | You can determine the name of the file accessed through request.
#
# (Actually, the latter is only true if you know the server configuration
# at the time the request was made!)
__version__ = "0.3"
__all__ = ["HTTPServer", "BaseHTTPRequestHandler"]
import sys
import time
import socket # For gethostbyaddr()
from warnings import filterwarnings, catch_warnings
with catch_warnings():
if sys.py3kwarning:
filterwarnings("ignore", ".*mimetools has been removed",
DeprecationWarning)
import mimetools
import SocketServer
# Default error message template
DEFAULT_ERROR_MESSAGE = """\
<head>
<title>Error response</title>
</head>
<body>
<h1>Error response</h1>
<p>Error code %(code)d.
<p>Message: %(message)s.
<p>Error code explanation: %(code)s = %(explain)s.
</body>
"""
DEFAULT_ERROR_CONTENT_TYPE = "text/html"
def _quote_html(html):
return html.replace("&", "&").replace("<", "<").replace(">", ">")
class HTTPServer(SocketServer.TCPServer):
allow_reuse_address = 1 # Seems to make sense in testing environment
def server_bind(self):
"""Override server_bind to store the server name."""
SocketServer.TCPServer.server_bind(self)
try:
host, port = self.socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
except socket.error:
pass
def server_activate(self):
SocketServer.TCPServer.server_activate(self)
# Adding a second call to getsockname() because of this issue
# http://wiki.python.org/jython/NewSocketModule#Deferredsocketcreationonjython
host, port = self.socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
class BaseHTTPRequestHandler(SocketServer.StreamRequestHandler):
"""HTTP request handler base class.
The following explanation of HTTP serves to guide you through the
code as well as to expose any misunderstandings I may have about
HTTP (so you don't need to read the code to figure out I'm wrong
:-).
HTTP (HyperText Transfer Protocol) is an extensible protocol on
top of a reliable stream transport (e.g. TCP/IP). The protocol
recognizes three parts to a request:
1. One line identifying the request type and path
2. An optional set of RFC-822-style headers
3. An optional data part
The headers and data are separated by a blank line.
The first line of the request has the form
<command> <path> <version>
where <command> is a (case-sensitive) keyword such as GET or POST,
<path> is a string containing path information for the request,
and <version> should be the string "HTTP/1.0" or "HTTP/1.1".
<path> is encoded using the URL encoding scheme (using %xx to signify
the ASCII character with hex code xx).
The specification specifies that lines are separated by CRLF but
for compatibility with the widest range of clients recommends
servers also handle LF. Similarly, whitespace in the request line
is treated sensibly (allowing multiple spaces between components
and allowing trailing whitespace).
Similarly, for output, lines ought to be separated by CRLF pairs
but most clients grok LF characters just fine.
If the first line of the request has the form
<command> <path>
(i.e. <version> is left out) then this is assumed to be an HTTP
0.9 request; this form has no optional headers and data part and
the reply consists of just the data.
The reply form of the HTTP 1.x protocol again has three parts:
1. One line giving the response code
2. An optional set of RFC-822-style headers
3. The data
Again, the headers and data are separated by a blank line.
The response code line has the form
<version> <responsecode> <responsestring>
where <version> is the protocol version ("HTTP/1.0" or "HTTP/1.1"),
<responsecode> is a 3-digit response code indicating success or
failure of the request, and <responsestring> is an optional
human-readable string explaining what the response code means.
This server parses the request and the headers, and then calls a
function specific to the request type (<command>). Specifically,
a request SPAM will be handled by a method do_SPAM(). If no
such method exists the server sends an error response to the
client. If it exists, it is called with no arguments:
do_SPAM()
Note that the request name is case sensitive (i.e. SPAM and spam
are different requests).
The various request details are stored in instance variables:
- client_address is the client IP address in the form (host,
port);
- command, path and version are the broken-down request line;
- headers is an instance of mimetools.Message (or a derived
class) containing the header information;
- rfile is a file object open for reading positioned at the
start of the optional input data part;
- wfile is a file object open for writing.
IT IS IMPORTANT TO ADHERE TO THE PROTOCOL FOR WRITING!
The first thing to be written must be the response line. Then
follow 0 or more header lines, then a blank line, and then the
actual data (if any). The meaning of the header lines depends on
the command executed by the server; in most cases, when data is
returned, there should be at least one header line of the form
Content-type: <type>/<subtype>
where <type> and <subtype> should be registered MIME types,
e.g. "text/html" or "text/plain".
"""
# The Python system version, truncated to its first component.
sys_version = "Python/" + sys.version.split()[0]
# The server software version. You may want to override this.
# The format is multiple whitespace-separated strings,
# where each string is of the form name[/version].
server_version = "BaseHTTP/" + __version__
# The default request version. This only affects responses up until
# the point where the request line is parsed, so it mainly decides what
# the client gets back when sending a malformed request line.
# Most web servers default to HTTP 0.9, i.e. don't send a status line.
default_request_version = "HTTP/0.9"
def parse_request(self):
"""Parse a request (internal).
The request should be stored in self.raw_requestline; the results
are in self.command, self.path, self.request_version and
self.headers.
Return True for success, False for failure; on failure, an
error is sent back.
"""
self.command = None # set in case of error on the first line
self.request_version = version = self.default_request_version
self.close_connection = 1
requestline = self.raw_requestline
requestline = requestline.rstrip('\r\n')
self.requestline = requestline
words = requestline.split()
if len(words) == 3:
command, path, version = words
if version[:5] != 'HTTP/':
self.send_error(400, "Bad request version (%r)" % version)
return False
try:
base_version_number = version.split('/', 1)[1]
version_number = base_version_number.split(".")
# RFC 2145 section 3.1 says there can be only one "." and
# - major and minor numbers MUST be treated as
# separate integers;
# - HTTP/2.4 is a lower version than HTTP/2.13, which in
# turn is lower than HTTP/12.3;
# - Leading zeros MUST be ignored by recipients.
if len(version_number) != 2:
raise ValueError
version_number = int(version_number[0]), int(version_number[1])
except (ValueError, IndexError):
self.send_error(400, "Bad request version (%r)" % version)
return False
if version_number >= (1, 1) and self.protocol_version >= "HTTP/1.1":
self.close_connection = 0
if version_number >= (2, 0):
self.send_error(505,
"Invalid HTTP Version (%s)" % base_version_number)
return False
elif len(words) == 2:
command, path = words
self.close_connection = 1
if command != 'GET':
self.send_error(400,
"Bad HTTP/0.9 request type (%r)" % command)
return False
elif not words:
return False
else:
self.send_error(400, "Bad request syntax (%r)" % requestline)
return False
self.command, self.path, self.request_version = command, path, version
# Examine the headers and look for a Connection directive
self.headers = self.MessageClass(self.rfile, 0)
conntype = self.headers.get('Connection', "")
if conntype.lower() == 'close':
self.close_connection = 1
elif (conntype.lower() == 'keep-alive' and
self.protocol_version >= "HTTP/1.1"):
self.close_connection = 0
return True
def handle_one_request(self):
"""Handle a single HTTP request.
You normally don't need to override this method; see the class
__doc__ string for information on how to handle specific HTTP
commands such as GET and POST.
"""
try:
self.raw_requestline = self.rfile.readline(65537)
if len(self.raw_requestline) > 65536:
self.requestline = ''
self.request_version = ''
self.command = ''
self.send_error(414)
return
if not self.raw_requestline:
self.close_connection = 1
return
if not self.parse_request():
# An error code has been sent, just exit
return
mname = 'do_' + self.command
if not hasattr(self, mname):
self.send_error(501, "Unsupported method (%r)" % self.command)
return
method = getattr(self, mname)
method()
self.wfile.flush() #actually send the response if not already done.
except socket.timeout, e:
#a read or a write timed out. Discard this connection
self.log_error("Request timed out: %r", e)
self.close_connection = 1
return
def handle(self):
"""Handle multiple requests if necessary."""
self.close_connection = 1
self.handle_one_request()
while not self.close_connection:
self.handle_one_request()
def send_error(self, code, message=None):
"""Send and log an error reply.
Arguments are the error code, and a detailed message.
The detailed message defaults to the short entry matching the
response code.
This sends an error response (so it must be called before any
output has been generated), logs the error, and finally sends
a piece of HTML explaining the error to the user.
"""
try:
short, long = self.responses[code]
except KeyError:
short, long = '???', '???'
if message is None:
message = short
explain = long
self.log_error("code %d, message %s", code, message)
# using _quote_html to prevent Cross Site Scripting attacks (see bug #1100201)
content = (self.error_message_format %
{'code': code, 'message': _quote_html(message), 'explain': explain})
self.send_response(code, message)
self.send_header("Content-Type", self.error_content_type)
self.send_header('Connection', 'close')
self.end_headers()
if self.command != 'HEAD' and code >= 200 and code not in (204, 304):
self.wfile.write(content)
error_message_format = DEFAULT_ERROR_MESSAGE
error_content_type = DEFAULT_ERROR_CONTENT_TYPE
def send_response(self, code, message=None):
"""Send the response header and log the response code.
Also send two standard headers with the server software
version and the current date.
"""
self.log_request(code)
if message is None:
if code in self.responses:
message = self.responses[code][0]
else:
message = ''
if self.request_version != 'HTTP/0.9':
self.wfile.write("%s %d %s\r\n" %
(self.protocol_version, code, message))
# print (self.protocol_version, code, message)
self.send_header('Server', self.version_string())
self.send_header('Date', self.date_time_string())
def send_header(self, keyword, value):
"""Send a MIME header."""
if self.request_version != 'HTTP/0.9':
self.wfile.write("%s: %s\r\n" % (keyword, value))
if keyword.lower() == 'connection':
if value.lower() == 'close':
self.close_connection = 1
elif value.lower() == 'keep-alive':
self.close_connection = 0
def end_headers(self):
"""Send the blank line ending the MIME headers."""
if self.request_version != 'HTTP/0.9':
self.wfile.write("\r\n")
def log_request(self, code='-', size='-'):
"""Log an accepted request.
This is called by send_response().
"""
self.log_message('"%s" %s %s',
self.requestline, str(code), str(size))
def log_error(self, format, *args):
"""Log an error.
This is called when a request cannot be fulfilled. By
default it passes the message on to log_message().
Arguments are the same as for log_message().
XXX This should go to the separate error log.
"""
self.log_message(format, *args)
def log_message(self, format, *args):
"""Log an arbitrary message.
This is used by all other logging functions. Override
it if you have specific logging wishes.
The first argument, FORMAT, is a format string for the
message to be logged. If the format string contains
any % escapes requiring parameters, they should be
specified as subsequent arguments (it's just like
printf!).
The client ip address and current date/time are prefixed to every
message.
"""
sys.stderr.write("%s - - [%s] %s\n" %
(self.client_address[0],
self.log_date_time_string(),
format%args))
def version_string(self):
"""Return the server software version string."""
return self.server_version + ' ' + self.sys_version
def date_time_string(self, timestamp=None):
"""Return the current date and time formatted for a message header."""
if timestamp is None:
timestamp = time.time()
year, month, day, hh, mm, ss, wd, y, z = time.gmtime(timestamp)
s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
self.weekdayname[wd],
day, self.monthname[month], year,
hh, mm, ss)
return s
def log_date_time_string(self):
"""Return the current time formatted for logging."""
now = time.time()
year, month, day, hh, mm, ss, x, y, z = time.localtime(now)
s = "%02d/%3s/%04d %02d:%02d:%02d" % (
day, self.monthname[month], year, hh, mm, ss)
return s
weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def address_string(self):
"""Return the client address formatted for logging.
This version looks up the full hostname using gethostbyaddr(),
and tries to find a name that contains at least one dot.
"""
host, port = self.client_address[:2]
return socket.getfqdn(host)
# Essentially static class variables
# The version of the HTTP protocol we support.
# Set this to HTTP/1.1 to enable automatic keepalive
protocol_version = "HTTP/1.0"
# The Message-like class used to parse headers
MessageClass = mimetools.Message
# Table mapping response codes to messages; entries have the
# form {code: (shortmessage, longmessage)}.
# See RFC 2616.
responses = {
100: ('Continue', 'Request received, please continue'),
101: ('Switching Protocols',
'Switching to new protocol; obey Upgrade header'),
200: ('OK', 'Request fulfilled, document follows'),
201: ('Created', 'Document created, URL follows'),
202: ('Accepted',
'Request accepted, processing continues off-line'),
203: ('Non-Authoritative Information', 'Request fulfilled from cache'),
204: ('No Content', 'Request fulfilled, nothing follows'),
205: ('Reset Content', 'Clear input form for further input.'),
206: ('Partial Content', 'Partial content follows.'),
300: ('Multiple Choices',
'Object has several resources -- see URI list'),
301: ('Moved Permanently', 'Object moved permanently -- see URI list'),
302: ('Found', 'Object moved temporarily -- see URI list'),
303: ('See Other', 'Object moved -- see Method and URL list'),
304: ('Not Modified',
'Document has not changed since given time'),
305: ('Use Proxy',
'You must use proxy specified in Location to access this '
'resource.'),
307: ('Temporary Redirect',
'Object moved temporarily -- see URI list'),
400: ('Bad Request',
'Bad request syntax or unsupported method'),
401: ('Unauthorized',
'No permission -- see authorization schemes'),
402: ('Payment Required',
'No payment -- see charging schemes'),
403: ('Forbidden',
'Request forbidden -- authorization will not help'),
404: ('Not Found', 'Nothing matches the given URI'),
405: ('Method Not Allowed',
'Specified method is invalid for this resource.'),
406: ('Not Acceptable', 'URI not available in preferred format.'),
407: ('Proxy Authentication Required', 'You must authenticate with '
'this proxy before proceeding.'),
408: ('Request Timeout', 'Request timed out; try again later.'),
409: ('Conflict', 'Request conflict.'),
410: ('Gone',
'URI no longer exists and has been permanently removed.'),
411: ('Length Required', 'Client must specify Content-Length.'),
412: ('Precondition Failed', 'Precondition in headers is false.'),
413: ('Request Entity Too Large', 'Entity is too large.'),
414: ('Request-URI Too Long', 'URI is too long.'),
415: ('Unsupported Media Type', 'Entity body in unsupported format.'),
416: ('Requested Range Not Satisfiable',
'Cannot satisfy request range.'),
417: ('Expectation Failed',
'Expect condition could not be satisfied.'),
500: ('Internal Server Error', 'Server got itself in trouble'),
501: ('Not Implemented',
'Server does not support this operation'),
502: ('Bad Gateway', 'Invalid responses from another server/proxy.'),
503: ('Service Unavailable',
'The server cannot process the request due to a high load'),
504: ('Gateway Timeout',
'The gateway server did not receive a timely response'),
505: ('HTTP Version Not Supported', 'Cannot fulfill request.'),
}
def test(HandlerClass = BaseHTTPRequestHandler,
ServerClass = HTTPServer, protocol="HTTP/1.0"):
"""Test the HTTP request handler class.
This runs an HTTP server on port 8000 (or the first command line
argument).
"""
if sys.argv[1:]:
port = int(sys.argv[1])
else:
port = 8000
server_address = ('', port)
HandlerClass.protocol_version = protocol
httpd = ServerClass(server_address, HandlerClass)
sa = httpd.socket.getsockname()
print "Serving HTTP on", sa[0], "port", sa[1], "..."
httpd.serve_forever()
if __name__ == '__main__':
test()
| [
"dodieboy@users.noreply.github.com"
] | dodieboy@users.noreply.github.com |
e1bfe9e9f183400da3f8fca486753fb5c5098df0 | afe4f8281c1b80621c8b682d22aed6bfc8d15fc7 | /blogaccountsite/wsgi.py | ec4aecfe41d47ac1b3b5ed242f544180a30a2e7d | [] | no_license | mahidul-islam/oj | 9242f1cd2ce9a2520a2c63321be7728bb33d756c | fab39fa4bac6cb8ad6d72cb139a22afa75b1fd56 | refs/heads/master | 2022-12-12T07:16:03.097332 | 2018-07-04T13:57:36 | 2018-07-04T13:57:36 | 139,730,070 | 1 | 0 | null | 2021-09-07T23:56:02 | 2018-07-04T14:04:45 | CSS | UTF-8 | Python | false | false | 499 | py | """
WSGI config for blogaccountsite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from whitenoise.django import DjangoWhiteNoise
from django.core.wsgi import get_wsgi_application
application = DjangoWhiteNoise(application)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "blogaccountsite.settings")
application = get_wsgi_application()
| [
"mizihan84@gmail.com"
] | mizihan84@gmail.com |
60e7b3178a56b624693b275db2e1fde0e14c68ce | 692b907d07eee8ce3ee32a1fda74b6d92fd6c548 | /tests/cli/v1_3_3/test_device_onboarding_pnp.py | 0ac37f9bcc1140404f5e92da4fcad93eb834bc6b | [
"MIT"
] | permissive | AltusConsulting/dnacentercli | 04c9c7d00b25753a26c643994388dd4e23bf4c54 | 26ea46fdbd40fc30649ea1d8803158655aa545aa | refs/heads/master | 2022-12-16T04:50:30.076420 | 2020-07-17T22:12:39 | 2020-07-17T22:12:39 | 212,206,213 | 0 | 0 | MIT | 2022-12-08T06:39:49 | 2019-10-01T21:50:42 | Python | UTF-8 | Python | false | false | 43,318 | py | # -*- coding: utf-8 -*-
"""DNACenterAPI Device Onboarding (PnP) API fixtures and tests.
Copyright (c) 2019 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import click
import pytest
from json import loads
from tests.environment import DNA_CENTER_VERSION
pytestmark = pytest.mark.skipif(DNA_CENTER_VERSION != '1.3.3', reason='version does not match')
def is_valid_get_sync_result_for_virtual_account(result):
data = result.output.strip()
return True if data else False
@pytest.mark.device_onboarding_pnp
def test_get_sync_result_for_virtual_account(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '1.3.3', *auth_options,
'device-onboarding-pnp', 'get-sync-result-for-virtual-account',
"""--domain='string'""",
"""--name='string'"""])
assert not result.exception
assert is_valid_get_sync_result_for_virtual_account(result)
def is_valid_un_claim_device(result):
data = result.output.strip()
return True if data else False
@pytest.mark.device_onboarding_pnp
def test_un_claim_device(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '1.3.3', *auth_options,
'device-onboarding-pnp', 'un-claim-device',
"""--active_validation=True""",
"""--deviceidlist='string'""",
"""--payload=None"""])
assert not result.exception
assert is_valid_un_claim_device(result)
def is_valid_update_device(result):
data = result.output.strip()
return True if data else False
@pytest.mark.device_onboarding_pnp
def test_update_device(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '1.3.3', *auth_options,
'device-onboarding-pnp', 'update-device',
"""--_id='string'""",
"""--active_validation=True""",
"""--deviceinfo='{"aaaCredentials": {"password": "string", "username": "string"}, "addedOn": 0, "addnMacAddrs": ["string"], "agentType": "POSIX", "authStatus": "string", "authenticatedSudiSerialNo": "string", "capabilitiesSupported": ["string"], "cmState": "NotContacted", "description": "string", "deviceSudiSerialNos": ["string"], "deviceType": "string", "featuresSupported": ["string"], "fileSystemList": [{"freespace": 0, "name": "string", "readable": true, "size": 0, "type": "string", "writeable": true}], "firstContact": 0, "hostname": "string", "httpHeaders": [{"key": "string", "value": "string"}], "imageFile": "string", "imageVersion": "string", "ipInterfaces": [{"ipv4Address": {}, "ipv6AddressList": [{}], "macAddress": "string", "name": "string", "status": "string"}], "lastContact": 0, "lastSyncTime": 0, "lastUpdateOn": 0, "location": {"address": "string", "altitude": "string", "latitude": "string", "longitude": "string", "siteId": "string"}, "macAddress": "string", "mode": "string", "name": "string", "neighborLinks": [{"localInterfaceName": "string", "localMacAddress": "string", "localShortInterfaceName": "string", "remoteDeviceName": "string", "remoteInterfaceName": "string", "remoteMacAddress": "string", "remotePlatform": "string", "remoteShortInterfaceName": "string", "remoteVersion": "string"}], "onbState": "NotContacted", "pid": "string", "pnpProfileList": [{"createdBy": "string", "discoveryCreated": true, "primaryEndpoint": {"certificate": "string", "fqdn": "string", "ipv4Address": {}, "ipv6Address": {}, "port": 0, "protocol": "string"}, "profileName": "string", "secondaryEndpoint": {"certificate": "string", "fqdn": "string", "ipv4Address": {}, "ipv6Address": {}, "port": 0, "protocol": "string"}}], "populateInventory": true, "preWorkflowCliOuputs": [{"cli": "string", "cliOutput": "string"}], "projectId": "string", "projectName": "string", "reloadRequested": true, "serialNumber": "string", "smartAccountId": "string", "source": "string", "stack": true, "stackInfo": {"isFullRing": true, "stackMemberList": [{"hardwareVersion": "string", "licenseLevel": "string", "licenseType": "string", "macAddress": "string", "pid": "string", "priority": 0, "role": "string", "serialNumber": "string", "softwareVersion": "string", "stackNumber": 0, "state": "string", "sudiSerialNumber": "string"}], "stackRingProtocol": "string", "supportsStackWorkflows": true, "totalMemberCount": 0, "validLicenseLevels": ["string"]}, "state": "Unclaimed", "sudiRequired": true, "tags": {}, "userSudiSerialNos": ["string"], "virtualAccountId": "string", "workflowId": "string", "workflowName": "string"}'""",
"""--id='string'""",
"""--payload=None""",
"""--runsummarylist='{"details": "string", "errorFlag": true, "historyTaskInfo": {"addnDetails": [{"key": "string", "value": "string"}], "name": "string", "timeTaken": 0, "type": "string", "workItemList": [{"command": "string", "endTime": 0, "outputStr": "string", "startTime": 0, "state": "string", "timeTaken": 0}]}, "timestamp": 0}'""",
"""--systemresetworkflow='{"_id": "string", "addToInventory": true, "addedOn": 0, "configId": "string", "currTaskIdx": 0, "description": "string", "endTime": 0, "execTime": 0, "imageId": "string", "instanceType": "SystemWorkflow", "lastupdateOn": 0, "name": "string", "startTime": 0, "state": "string", "tasks": [{"currWorkItemIdx": 0, "endTime": 0, "name": "string", "startTime": 0, "state": "string", "taskSeqNo": 0, "timeTaken": 0, "type": "string", "workItemList": [{"command": "string", "endTime": 0, "outputStr": "string", "startTime": 0, "state": "string", "timeTaken": 0}]}], "tenantId": "string", "type": "string", "useState": "string", "version": 0}'""",
"""--systemworkflow='{"_id": "string", "addToInventory": true, "addedOn": 0, "configId": "string", "currTaskIdx": 0, "description": "string", "endTime": 0, "execTime": 0, "imageId": "string", "instanceType": "SystemWorkflow", "lastupdateOn": 0, "name": "string", "startTime": 0, "state": "string", "tasks": [{"currWorkItemIdx": 0, "endTime": 0, "name": "string", "startTime": 0, "state": "string", "taskSeqNo": 0, "timeTaken": 0, "type": "string", "workItemList": [{"command": "string", "endTime": 0, "outputStr": "string", "startTime": 0, "state": "string", "timeTaken": 0}]}], "tenantId": "string", "type": "string", "useState": "string", "version": 0}'""",
"""--tenantid='string'""",
"""--version=0""",
"""--workflow='{"_id": "string", "addToInventory": true, "addedOn": 0, "configId": "string", "currTaskIdx": 0, "description": "string", "endTime": 0, "execTime": 0, "imageId": "string", "instanceType": "SystemWorkflow", "lastupdateOn": 0, "name": "string", "startTime": 0, "state": "string", "tasks": [{"currWorkItemIdx": 0, "endTime": 0, "name": "string", "startTime": 0, "state": "string", "taskSeqNo": 0, "timeTaken": 0, "type": "string", "workItemList": [{"command": "string", "endTime": 0, "outputStr": "string", "startTime": 0, "state": "string", "timeTaken": 0}]}], "tenantId": "string", "type": "string", "useState": "string", "version": 0}'""",
"""--workflowparameters='{"configList": [{"configId": "string", "configParameters": [{"key": "string", "value": "string"}]}], "licenseLevel": "string", "licenseType": "string", "topOfStackSerialNumber": "string"}'"""])
assert not result.exception
assert is_valid_update_device(result)
def is_valid_import_devices_in_bulk(result):
data = result.output.strip()
return True if data else False
@pytest.mark.device_onboarding_pnp
def test_import_devices_in_bulk(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '1.3.3', *auth_options,
'device-onboarding-pnp', 'import-devices-in-bulk',
"""--active_validation=True""",
"""--payload='{"_id": "string", "deviceInfo": {"aaaCredentials": {"password": "string", "username": "string"}, "addedOn": 0, "addnMacAddrs": ["string"], "agentType": "POSIX", "authStatus": "string", "authenticatedSudiSerialNo": "string", "capabilitiesSupported": ["string"], "cmState": "NotContacted", "description": "string", "deviceSudiSerialNos": ["string"], "deviceType": "string", "featuresSupported": ["string"], "fileSystemList": [{"freespace": 0, "name": "string", "readable": true, "size": 0, "type": "string", "writeable": true}], "firstContact": 0, "hostname": "string", "httpHeaders": [{"key": "string", "value": "string"}], "imageFile": "string", "imageVersion": "string", "ipInterfaces": [{"ipv4Address": {}, "ipv6AddressList": [{}], "macAddress": "string", "name": "string", "status": "string"}], "lastContact": 0, "lastSyncTime": 0, "lastUpdateOn": 0, "location": {"address": "string", "altitude": "string", "latitude": "string", "longitude": "string", "siteId": "string"}, "macAddress": "string", "mode": "string", "name": "string", "neighborLinks": [{"localInterfaceName": "string", "localMacAddress": "string", "localShortInterfaceName": "string", "remoteDeviceName": "string", "remoteInterfaceName": "string", "remoteMacAddress": "string", "remotePlatform": "string", "remoteShortInterfaceName": "string", "remoteVersion": "string"}], "onbState": "NotContacted", "pid": "string", "pnpProfileList": [{"createdBy": "string", "discoveryCreated": true, "primaryEndpoint": {"certificate": "string", "fqdn": "string", "ipv4Address": {}, "ipv6Address": {}, "port": 0, "protocol": "string"}, "profileName": "string", "secondaryEndpoint": {"certificate": "string", "fqdn": "string", "ipv4Address": {}, "ipv6Address": {}, "port": 0, "protocol": "string"}}], "populateInventory": true, "preWorkflowCliOuputs": [{"cli": "string", "cliOutput": "string"}], "projectId": "string", "projectName": "string", "reloadRequested": true, "serialNumber": "string", "smartAccountId": "string", "source": "string", "stack": true, "stackInfo": {"isFullRing": true, "stackMemberList": [{"hardwareVersion": "string", "licenseLevel": "string", "licenseType": "string", "macAddress": "string", "pid": "string", "priority": 0, "role": "string", "serialNumber": "string", "softwareVersion": "string", "stackNumber": 0, "state": "string", "sudiSerialNumber": "string"}], "stackRingProtocol": "string", "supportsStackWorkflows": true, "totalMemberCount": 0, "validLicenseLevels": ["string"]}, "state": "Unclaimed", "sudiRequired": true, "tags": {}, "userSudiSerialNos": ["string"], "virtualAccountId": "string", "workflowId": "string", "workflowName": "string"}, "runSummaryList": [{"details": "string", "errorFlag": true, "historyTaskInfo": {"addnDetails": [{"key": "string", "value": "string"}], "name": "string", "timeTaken": 0, "type": "string", "workItemList": [{"command": "string", "endTime": 0, "outputStr": "string", "startTime": 0, "state": "string", "timeTaken": 0}]}, "timestamp": 0}], "systemResetWorkflow": {"_id": "string", "addToInventory": true, "addedOn": 0, "configId": "string", "currTaskIdx": 0, "description": "string", "endTime": 0, "execTime": 0, "imageId": "string", "instanceType": "SystemWorkflow", "lastupdateOn": 0, "name": "string", "startTime": 0, "state": "string", "tasks": [{"currWorkItemIdx": 0, "endTime": 0, "name": "string", "startTime": 0, "state": "string", "taskSeqNo": 0, "timeTaken": 0, "type": "string", "workItemList": [{"command": "string", "endTime": 0, "outputStr": "string", "startTime": 0, "state": "string", "timeTaken": 0}]}], "tenantId": "string", "type": "string", "useState": "string", "version": 0}, "systemWorkflow": {"_id": "string", "addToInventory": true, "addedOn": 0, "configId": "string", "currTaskIdx": 0, "description": "string", "endTime": 0, "execTime": 0, "imageId": "string", "instanceType": "SystemWorkflow", "lastupdateOn": 0, "name": "string", "startTime": 0, "state": "string", "tasks": [{"currWorkItemIdx": 0, "endTime": 0, "name": "string", "startTime": 0, "state": "string", "taskSeqNo": 0, "timeTaken": 0, "type": "string", "workItemList": [{"command": "string", "endTime": 0, "outputStr": "string", "startTime": 0, "state": "string", "timeTaken": 0}]}], "tenantId": "string", "type": "string", "useState": "string", "version": 0}, "tenantId": "string", "version": 0, "workflow": {"_id": "string", "addToInventory": true, "addedOn": 0, "configId": "string", "currTaskIdx": 0, "description": "string", "endTime": 0, "execTime": 0, "imageId": "string", "instanceType": "SystemWorkflow", "lastupdateOn": 0, "name": "string", "startTime": 0, "state": "string", "tasks": [{"currWorkItemIdx": 0, "endTime": 0, "name": "string", "startTime": 0, "state": "string", "taskSeqNo": 0, "timeTaken": 0, "type": "string", "workItemList": [{"command": "string", "endTime": 0, "outputStr": "string", "startTime": 0, "state": "string", "timeTaken": 0}]}], "tenantId": "string", "type": "string", "useState": "string", "version": 0}, "workflowParameters": {"configList": [{"configId": "string", "configParameters": [{"key": "string", "value": "string"}]}], "licenseLevel": "string", "licenseType": "string", "topOfStackSerialNumber": "string"}}'"""])
assert not result.exception
assert is_valid_import_devices_in_bulk(result)
def is_valid_add_virtual_account(result):
data = result.output.strip()
return True if data else False
@pytest.mark.device_onboarding_pnp
def test_add_virtual_account(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '1.3.3', *auth_options,
'device-onboarding-pnp', 'add-virtual-account',
"""--active_validation=True""",
"""--autosyncperiod=0""",
"""--ccouser='string'""",
"""--expiry=0""",
"""--lastsync=0""",
"""--payload=None""",
"""--profile='{"addressFqdn": "string", "addressIpV4": "string", "cert": "string", "makeDefault": true, "name": "string", "port": 0, "profileId": "string", "proxy": true}'""",
"""--smartaccountid='string'""",
"""--syncresult='{"syncList": [{"deviceSnList": ["string"], "syncType": "Add"}], "syncMsg": "string"}'""",
"""--syncresultstr='string'""",
"""--syncstarttime=0""",
"""--syncstatus='NOT_SYNCED'""",
"""--tenantid='string'""",
"""--token='string'""",
"""--virtualaccountid='string'"""])
assert not result.exception
assert is_valid_add_virtual_account(result)
def is_valid_update_workflow(result):
data = result.output.strip()
return True if data else False
@pytest.mark.device_onboarding_pnp
def test_update_workflow(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '1.3.3', *auth_options,
'device-onboarding-pnp', 'update-workflow',
"""--_id='string'""",
"""--active_validation=True""",
"""--addtoinventory=True""",
"""--addedon=0""",
"""--configid='string'""",
"""--currtaskidx=0""",
"""--description='string'""",
"""--endtime=0""",
"""--exectime=0""",
"""--id='string'""",
"""--imageid='string'""",
"""--instancetype='SystemWorkflow'""",
"""--lastupdateon=0""",
"""--name='string'""",
"""--payload=None""",
"""--starttime=0""",
"""--state='string'""",
"""--tasks='{"currWorkItemIdx": 0, "endTime": 0, "name": "string", "startTime": 0, "state": "string", "taskSeqNo": 0, "timeTaken": 0, "type": "string", "workItemList": [{"command": "string", "endTime": 0, "outputStr": "string", "startTime": 0, "state": "string", "timeTaken": 0}]}'""",
"""--tenantid='string'""",
"""--type='string'""",
"""--usestate='string'""",
"""--version=0"""])
assert not result.exception
assert is_valid_update_workflow(result)
def is_valid_deregister_virtual_account(result):
data = result.output.strip()
return True if data else False
@pytest.mark.device_onboarding_pnp
def test_deregister_virtual_account(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '1.3.3', *auth_options,
'device-onboarding-pnp', 'deregister-virtual-account',
"""--domain='string'""",
"""--name='string'"""])
assert not result.exception
assert is_valid_deregister_virtual_account(result)
def is_valid_get_smart_account_list(result):
data = result.output.strip()
return True if data else False
@pytest.mark.device_onboarding_pnp
def test_get_smart_account_list(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '1.3.3', *auth_options,
'device-onboarding-pnp', 'get-smart-account-list',
"""--"""])
assert not result.exception
assert is_valid_get_smart_account_list(result)
def is_valid_claim_a_device_to_a_site(result):
data = result.output.strip()
return True if data else False
@pytest.mark.device_onboarding_pnp
def test_claim_a_device_to_a_site(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '1.3.3', *auth_options,
'device-onboarding-pnp', 'claim-a-device-to-a-site',
"""--active_validation=True""",
"""--deviceid='string'""",
"""--payload=None""",
"""--siteid='string'""",
"""--type='Default'"""])
assert not result.exception
assert is_valid_claim_a_device_to_a_site(result)
def is_valid_update_pnp_server_profile(result):
data = result.output.strip()
return True if data else False
@pytest.mark.device_onboarding_pnp
def test_update_pnp_server_profile(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '1.3.3', *auth_options,
'device-onboarding-pnp', 'update-pnp-server-profile',
"""--active_validation=True""",
"""--autosyncperiod=0""",
"""--ccouser='string'""",
"""--expiry=0""",
"""--lastsync=0""",
"""--payload=None""",
"""--profile='{"addressFqdn": "string", "addressIpV4": "string", "cert": "string", "makeDefault": true, "name": "string", "port": 0, "profileId": "string", "proxy": true}'""",
"""--smartaccountid='string'""",
"""--syncresult='{"syncList": [{"deviceSnList": ["string"], "syncType": "Add"}], "syncMsg": "string"}'""",
"""--syncresultstr='string'""",
"""--syncstarttime=0""",
"""--syncstatus='NOT_SYNCED'""",
"""--tenantid='string'""",
"""--token='string'""",
"""--virtualaccountid='string'"""])
assert not result.exception
assert is_valid_update_pnp_server_profile(result)
def is_valid_get_pnp_global_settings(result):
data = result.output.strip()
return True if data else False
@pytest.mark.device_onboarding_pnp
def test_get_pnp_global_settings(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '1.3.3', *auth_options,
'device-onboarding-pnp', 'get-pnp-global-settings',
"""--"""])
assert not result.exception
assert is_valid_get_pnp_global_settings(result)
def is_valid_get_workflow_count(result):
data = result.output.strip()
return True if data else False
@pytest.mark.device_onboarding_pnp
def test_get_workflow_count(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '1.3.3', *auth_options,
'device-onboarding-pnp', 'get-workflow-count',
"""--name='value1,value2'"""])
assert not result.exception
assert is_valid_get_workflow_count(result)
def is_valid_get_virtual_account_list(result):
data = result.output.strip()
return True if data else False
@pytest.mark.device_onboarding_pnp
def test_get_virtual_account_list(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '1.3.3', *auth_options,
'device-onboarding-pnp', 'get-virtual-account-list',
"""--domain='string'"""])
assert not result.exception
assert is_valid_get_virtual_account_list(result)
def is_valid_get_workflow_by_id(result):
data = result.output.strip()
return True if data else False
@pytest.mark.device_onboarding_pnp
def test_get_workflow_by_id(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '1.3.3', *auth_options,
'device-onboarding-pnp', 'get-workflow-by-id',
"""--id='string'"""])
assert not result.exception
assert is_valid_get_workflow_by_id(result)
def is_valid_add_a_workflow(result):
data = result.output.strip()
return True if data else False
@pytest.mark.device_onboarding_pnp
def test_add_a_workflow(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '1.3.3', *auth_options,
'device-onboarding-pnp', 'add-a-workflow',
"""--_id='string'""",
"""--active_validation=True""",
"""--addtoinventory=True""",
"""--addedon=0""",
"""--configid='string'""",
"""--currtaskidx=0""",
"""--description='string'""",
"""--endtime=0""",
"""--exectime=0""",
"""--imageid='string'""",
"""--instancetype='SystemWorkflow'""",
"""--lastupdateon=0""",
"""--name='string'""",
"""--payload=None""",
"""--starttime=0""",
"""--state='string'""",
"""--tasks='{"currWorkItemIdx": 0, "endTime": 0, "name": "string", "startTime": 0, "state": "string", "taskSeqNo": 0, "timeTaken": 0, "type": "string", "workItemList": [{"command": "string", "endTime": 0, "outputStr": "string", "startTime": 0, "state": "string", "timeTaken": 0}]}'""",
"""--tenantid='string'""",
"""--type='string'""",
"""--usestate='string'""",
"""--version=0"""])
assert not result.exception
assert is_valid_add_a_workflow(result)
def is_valid_update_pnp_global_settings(result):
data = result.output.strip()
return True if data else False
@pytest.mark.device_onboarding_pnp
def test_update_pnp_global_settings(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '1.3.3', *auth_options,
'device-onboarding-pnp', 'update-pnp-global-settings',
"""--_id='string'""",
"""--aaacredentials='{"password": "string", "username": "string"}'""",
"""--accepteula=True""",
"""--active_validation=True""",
"""--defaultprofile='{"cert": "string", "fqdnAddresses": ["string"], "ipAddresses": ["string"], "port": 0, "proxy": true}'""",
"""--payload=None""",
"""--savamappinglist='{"autoSyncPeriod": 0, "ccoUser": "string", "expiry": 0, "lastSync": 0, "profile": {"addressFqdn": "string", "addressIpV4": "string", "cert": "string", "makeDefault": true, "name": "string", "port": 0, "profileId": "string", "proxy": true}, "smartAccountId": "string", "syncResult": {"syncList": [{"deviceSnList": ["string"], "syncType": "Add"}], "syncMsg": "string"}, "syncResultStr": "string", "syncStartTime": 0, "syncStatus": "NOT_SYNCED", "tenantId": "string", "token": "string", "virtualAccountId": "string"}'""",
"""--tasktimeouts='{"configTimeOut": 0, "generalTimeOut": 0, "imageDownloadTimeOut": 0}'""",
"""--tenantid='string'""",
"""--version=0"""])
assert not result.exception
assert is_valid_update_pnp_global_settings(result)
def is_valid_reset_device(result):
data = result.output.strip()
return True if data else False
@pytest.mark.device_onboarding_pnp
def test_reset_device(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '1.3.3', *auth_options,
'device-onboarding-pnp', 'reset-device',
"""--active_validation=True""",
"""--deviceresetlist='{"configList": [{"configId": "string", "configParameters": [{"key": "string", "value": "string"}]}], "deviceId": "string", "licenseLevel": "string", "licenseType": "string", "topOfStackSerialNumber": "string"}'""",
"""--payload=None""",
"""--projectid='string'""",
"""--workflowid='string'"""])
assert not result.exception
assert is_valid_reset_device(result)
def is_valid_sync_virtual_account_devices(result):
data = result.output.strip()
return True if data else False
@pytest.mark.device_onboarding_pnp
def test_sync_virtual_account_devices(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '1.3.3', *auth_options,
'device-onboarding-pnp', 'sync-virtual-account-devices',
"""--active_validation=True""",
"""--autosyncperiod=0""",
"""--ccouser='string'""",
"""--expiry=0""",
"""--lastsync=0""",
"""--payload=None""",
"""--profile='{"addressFqdn": "string", "addressIpV4": "string", "cert": "string", "makeDefault": true, "name": "string", "port": 0, "profileId": "string", "proxy": true}'""",
"""--smartaccountid='string'""",
"""--syncresult='{"syncList": [{"deviceSnList": ["string"], "syncType": "Add"}], "syncMsg": "string"}'""",
"""--syncresultstr='string'""",
"""--syncstarttime=0""",
"""--syncstatus='NOT_SYNCED'""",
"""--tenantid='string'""",
"""--token='string'""",
"""--virtualaccountid='string'"""])
assert not result.exception
assert is_valid_sync_virtual_account_devices(result)
def is_valid_delete_workflow_by_id(result):
data = result.output.strip()
return True if data else False
@pytest.mark.device_onboarding_pnp
def test_delete_workflow_by_id(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '1.3.3', *auth_options,
'device-onboarding-pnp', 'delete-workflow-by-id',
"""--id='string'"""])
assert not result.exception
assert is_valid_delete_workflow_by_id(result)
def is_valid_get_workflows(result):
data = result.output.strip()
return True if data else False
@pytest.mark.device_onboarding_pnp
def test_get_workflows(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '1.3.3', *auth_options,
'device-onboarding-pnp', 'get-workflows',
"""--limit=0""",
"""--name='value1,value2'""",
"""--offset=0""",
"""--sort='value1,value2'""",
"""--sort_order='string'""",
"""--type='value1,value2'"""])
assert not result.exception
assert is_valid_get_workflows(result)
def is_valid_preview_config(result):
data = result.output.strip()
return True if data else False
@pytest.mark.device_onboarding_pnp
def test_preview_config(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '1.3.3', *auth_options,
'device-onboarding-pnp', 'preview-config',
"""--active_validation=True""",
"""--deviceid='string'""",
"""--payload=None""",
"""--siteid='string'""",
"""--type='Default'"""])
assert not result.exception
assert is_valid_preview_config(result)
def is_valid_get_device_by_id(result):
data = result.output.strip()
return True if data else False
@pytest.mark.device_onboarding_pnp
def test_get_device_by_id(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '1.3.3', *auth_options,
'device-onboarding-pnp', 'get-device-by-id',
"""--id='string'"""])
assert not result.exception
assert is_valid_get_device_by_id(result)
def is_valid_claim_device(result):
data = result.output.strip()
return True if data else False
@pytest.mark.device_onboarding_pnp
def test_claim_device(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '1.3.3', *auth_options,
'device-onboarding-pnp', 'claim-device',
"""--active_validation=True""",
"""--configfileurl='string'""",
"""--configid='string'""",
"""--deviceclaimlist='{"configList": [{"configId": "string", "configParameters": [{"key": "string", "value": "string"}]}], "deviceId": "string", "licenseLevel": "string", "licenseType": "string", "topOfStackSerialNumber": "string"}'""",
"""--fileserviceid='string'""",
"""--imageid='string'""",
"""--imageurl='string'""",
"""--payload=None""",
"""--populateinventory=True""",
"""--projectid='string'""",
"""--workflowid='string'"""])
assert not result.exception
assert is_valid_claim_device(result)
def is_valid_delete_device_by_id_from_pnp(result):
data = result.output.strip()
return True if data else False
@pytest.mark.device_onboarding_pnp
def test_delete_device_by_id_from_pnp(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '1.3.3', *auth_options,
'device-onboarding-pnp', 'delete-device-by-id-from-pnp',
"""--id='string'"""])
assert not result.exception
assert is_valid_delete_device_by_id_from_pnp(result)
def is_valid_get_device_list(result):
data = result.output.strip()
return True if data else False
@pytest.mark.device_onboarding_pnp
def test_get_device_list(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '1.3.3', *auth_options,
'device-onboarding-pnp', 'get-device-list',
"""--cm_state='value1,value2'""",
"""--last_contact=True""",
"""--limit=0""",
"""--name='value1,value2'""",
"""--offset=0""",
"""--onb_state='value1,value2'""",
"""--pid='value1,value2'""",
"""--project_id='value1,value2'""",
"""--project_name='value1,value2'""",
"""--serial_number='value1,value2'""",
"""--smart_account_id='value1,value2'""",
"""--sort='value1,value2'""",
"""--sort_order='string'""",
"""--source='value1,value2'""",
"""--state='value1,value2'""",
"""--virtual_account_id='value1,value2'""",
"""--workflow_id='value1,value2'""",
"""--workflow_name='value1,value2'"""])
assert not result.exception
assert is_valid_get_device_list(result)
def is_valid_add_device(result):
data = result.output.strip()
return True if data else False
@pytest.mark.device_onboarding_pnp
def test_add_device(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '1.3.3', *auth_options,
'device-onboarding-pnp', 'add-device',
"""--_id='string'""",
"""--active_validation=True""",
"""--deviceinfo='{"aaaCredentials": {"password": "string", "username": "string"}, "addedOn": 0, "addnMacAddrs": ["string"], "agentType": "POSIX", "authStatus": "string", "authenticatedSudiSerialNo": "string", "capabilitiesSupported": ["string"], "cmState": "NotContacted", "description": "string", "deviceSudiSerialNos": ["string"], "deviceType": "string", "featuresSupported": ["string"], "fileSystemList": [{"freespace": 0, "name": "string", "readable": true, "size": 0, "type": "string", "writeable": true}], "firstContact": 0, "hostname": "string", "httpHeaders": [{"key": "string", "value": "string"}], "imageFile": "string", "imageVersion": "string", "ipInterfaces": [{"ipv4Address": {}, "ipv6AddressList": [{}], "macAddress": "string", "name": "string", "status": "string"}], "lastContact": 0, "lastSyncTime": 0, "lastUpdateOn": 0, "location": {"address": "string", "altitude": "string", "latitude": "string", "longitude": "string", "siteId": "string"}, "macAddress": "string", "mode": "string", "name": "string", "neighborLinks": [{"localInterfaceName": "string", "localMacAddress": "string", "localShortInterfaceName": "string", "remoteDeviceName": "string", "remoteInterfaceName": "string", "remoteMacAddress": "string", "remotePlatform": "string", "remoteShortInterfaceName": "string", "remoteVersion": "string"}], "onbState": "NotContacted", "pid": "string", "pnpProfileList": [{"createdBy": "string", "discoveryCreated": true, "primaryEndpoint": {"certificate": "string", "fqdn": "string", "ipv4Address": {}, "ipv6Address": {}, "port": 0, "protocol": "string"}, "profileName": "string", "secondaryEndpoint": {"certificate": "string", "fqdn": "string", "ipv4Address": {}, "ipv6Address": {}, "port": 0, "protocol": "string"}}], "populateInventory": true, "preWorkflowCliOuputs": [{"cli": "string", "cliOutput": "string"}], "projectId": "string", "projectName": "string", "reloadRequested": true, "serialNumber": "string", "smartAccountId": "string", "source": "string", "stack": true, "stackInfo": {"isFullRing": true, "stackMemberList": [{"hardwareVersion": "string", "licenseLevel": "string", "licenseType": "string", "macAddress": "string", "pid": "string", "priority": 0, "role": "string", "serialNumber": "string", "softwareVersion": "string", "stackNumber": 0, "state": "string", "sudiSerialNumber": "string"}], "stackRingProtocol": "string", "supportsStackWorkflows": true, "totalMemberCount": 0, "validLicenseLevels": ["string"]}, "state": "Unclaimed", "sudiRequired": true, "tags": {}, "userSudiSerialNos": ["string"], "virtualAccountId": "string", "workflowId": "string", "workflowName": "string"}'""",
"""--payload=None""",
"""--runsummarylist='{"details": "string", "errorFlag": true, "historyTaskInfo": {"addnDetails": [{"key": "string", "value": "string"}], "name": "string", "timeTaken": 0, "type": "string", "workItemList": [{"command": "string", "endTime": 0, "outputStr": "string", "startTime": 0, "state": "string", "timeTaken": 0}]}, "timestamp": 0}'""",
"""--systemresetworkflow='{"_id": "string", "addToInventory": true, "addedOn": 0, "configId": "string", "currTaskIdx": 0, "description": "string", "endTime": 0, "execTime": 0, "imageId": "string", "instanceType": "SystemWorkflow", "lastupdateOn": 0, "name": "string", "startTime": 0, "state": "string", "tasks": [{"currWorkItemIdx": 0, "endTime": 0, "name": "string", "startTime": 0, "state": "string", "taskSeqNo": 0, "timeTaken": 0, "type": "string", "workItemList": [{"command": "string", "endTime": 0, "outputStr": "string", "startTime": 0, "state": "string", "timeTaken": 0}]}], "tenantId": "string", "type": "string", "useState": "string", "version": 0}'""",
"""--systemworkflow='{"_id": "string", "addToInventory": true, "addedOn": 0, "configId": "string", "currTaskIdx": 0, "description": "string", "endTime": 0, "execTime": 0, "imageId": "string", "instanceType": "SystemWorkflow", "lastupdateOn": 0, "name": "string", "startTime": 0, "state": "string", "tasks": [{"currWorkItemIdx": 0, "endTime": 0, "name": "string", "startTime": 0, "state": "string", "taskSeqNo": 0, "timeTaken": 0, "type": "string", "workItemList": [{"command": "string", "endTime": 0, "outputStr": "string", "startTime": 0, "state": "string", "timeTaken": 0}]}], "tenantId": "string", "type": "string", "useState": "string", "version": 0}'""",
"""--tenantid='string'""",
"""--version=0""",
"""--workflow='{"_id": "string", "addToInventory": true, "addedOn": 0, "configId": "string", "currTaskIdx": 0, "description": "string", "endTime": 0, "execTime": 0, "imageId": "string", "instanceType": "SystemWorkflow", "lastupdateOn": 0, "name": "string", "startTime": 0, "state": "string", "tasks": [{"currWorkItemIdx": 0, "endTime": 0, "name": "string", "startTime": 0, "state": "string", "taskSeqNo": 0, "timeTaken": 0, "type": "string", "workItemList": [{"command": "string", "endTime": 0, "outputStr": "string", "startTime": 0, "state": "string", "timeTaken": 0}]}], "tenantId": "string", "type": "string", "useState": "string", "version": 0}'""",
"""--workflowparameters='{"configList": [{"configId": "string", "configParameters": [{"key": "string", "value": "string"}]}], "licenseLevel": "string", "licenseType": "string", "topOfStackSerialNumber": "string"}'"""])
assert not result.exception
assert is_valid_add_device(result)
def is_valid_get_device_count(result):
data = result.output.strip()
return True if data else False
@pytest.mark.device_onboarding_pnp
def test_get_device_count(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '1.3.3', *auth_options,
'device-onboarding-pnp', 'get-device-count',
"""--cm_state='value1,value2'""",
"""--last_contact=True""",
"""--name='value1,value2'""",
"""--onb_state='value1,value2'""",
"""--pid='value1,value2'""",
"""--project_id='value1,value2'""",
"""--project_name='value1,value2'""",
"""--serial_number='value1,value2'""",
"""--smart_account_id='value1,value2'""",
"""--source='value1,value2'""",
"""--state='value1,value2'""",
"""--virtual_account_id='value1,value2'""",
"""--workflow_id='value1,value2'""",
"""--workflow_name='value1,value2'"""])
assert not result.exception
assert is_valid_get_device_count(result)
def is_valid_get_device_history(result):
data = result.output.strip()
return True if data else False
@pytest.mark.device_onboarding_pnp
def test_get_device_history(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '1.3.3', *auth_options,
'device-onboarding-pnp', 'get-device-history',
"""--serial_number='string'""",
"""--sort='value1,value2'""",
"""--sort_order='string'"""])
assert not result.exception
assert is_valid_get_device_history(result)
| [
"jbogarin@altus.cr"
] | jbogarin@altus.cr |
6ac04cb5b96f0a40f16345db3711813dc5793109 | 51f887286aa3bd2c3dbe4c616ad306ce08976441 | /pybind/slxos/v17s_1_02/isis_state/interface_detail/isis_intf/l2_circ_metrics/__init__.py | 80633925b236960c4d49636892f40d3900cec3c5 | [
"Apache-2.0"
] | permissive | b2220333/pybind | a8c06460fd66a97a78c243bf144488eb88d7732a | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | refs/heads/master | 2020-03-18T09:09:29.574226 | 2018-04-03T20:09:50 | 2018-04-03T20:09:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38,594 | py |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class l2_circ_metrics(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-isis-operational - based on the path /isis-state/interface-detail/isis-intf/l2-circ-metrics. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__auth_check','__auth_mode','__auth_key','__circ_metric','__ip6_circ_metric','__circ_priority','__hello_int','__hello_mult','__dis','__dis_ch','__next_hello','__active_adj',)
_yang_name = 'l2-circ-metrics'
_rest_name = 'l2-circ-metrics'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__hello_int = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="hello-int", rest_name="hello-int", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__ip6_circ_metric = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ip6-circ-metric", rest_name="ip6-circ-metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__auth_key = YANGDynClass(base=unicode, is_leaf=True, yang_name="auth-key", rest_name="auth-key", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='string', is_config=False)
self.__circ_metric = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="circ-metric", rest_name="circ-metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__dis_ch = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="dis-ch", rest_name="dis-ch", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__active_adj = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="active-adj", rest_name="active-adj", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__auth_mode = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'none': {'value': 0}, u'cleartext': {'value': 1}, u'md5': {'value': 2}},), is_leaf=True, yang_name="auth-mode", rest_name="auth-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='auth-mode', is_config=False)
self.__circ_priority = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="circ-priority", rest_name="circ-priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint8', is_config=False)
self.__dis = YANGDynClass(base=unicode, is_leaf=True, yang_name="dis", rest_name="dis", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='string', is_config=False)
self.__next_hello = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="next-hello", rest_name="next-hello", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__auth_check = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'is-enabled': {'value': 1}, u'is-disabled': {'value': 0}},), is_leaf=True, yang_name="auth-check", rest_name="auth-check", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-status', is_config=False)
self.__hello_mult = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="hello-mult", rest_name="hello-mult", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'isis-state', u'interface-detail', u'isis-intf', u'l2-circ-metrics']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'isis-state', u'interface-detail', u'isis-intf', u'l2-circ-metrics']
def _get_auth_check(self):
"""
Getter method for auth_check, mapped from YANG variable /isis_state/interface_detail/isis_intf/l2_circ_metrics/auth_check (isis-status)
YANG Description: If authentication enabled on incoming IS-IS PDUs
"""
return self.__auth_check
def _set_auth_check(self, v, load=False):
"""
Setter method for auth_check, mapped from YANG variable /isis_state/interface_detail/isis_intf/l2_circ_metrics/auth_check (isis-status)
If this variable is read-only (config: false) in the
source YANG file, then _set_auth_check is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_auth_check() directly.
YANG Description: If authentication enabled on incoming IS-IS PDUs
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'is-enabled': {'value': 1}, u'is-disabled': {'value': 0}},), is_leaf=True, yang_name="auth-check", rest_name="auth-check", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-status', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """auth_check must be of a type compatible with isis-status""",
'defined-type': "brocade-isis-operational:isis-status",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'is-enabled': {'value': 1}, u'is-disabled': {'value': 0}},), is_leaf=True, yang_name="auth-check", rest_name="auth-check", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-status', is_config=False)""",
})
self.__auth_check = t
if hasattr(self, '_set'):
self._set()
def _unset_auth_check(self):
self.__auth_check = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'is-enabled': {'value': 1}, u'is-disabled': {'value': 0}},), is_leaf=True, yang_name="auth-check", rest_name="auth-check", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-status', is_config=False)
def _get_auth_mode(self):
"""
Getter method for auth_mode, mapped from YANG variable /isis_state/interface_detail/isis_intf/l2_circ_metrics/auth_mode (auth-mode)
YANG Description: IS-IS authentication mode
"""
return self.__auth_mode
def _set_auth_mode(self, v, load=False):
"""
Setter method for auth_mode, mapped from YANG variable /isis_state/interface_detail/isis_intf/l2_circ_metrics/auth_mode (auth-mode)
If this variable is read-only (config: false) in the
source YANG file, then _set_auth_mode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_auth_mode() directly.
YANG Description: IS-IS authentication mode
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'none': {'value': 0}, u'cleartext': {'value': 1}, u'md5': {'value': 2}},), is_leaf=True, yang_name="auth-mode", rest_name="auth-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='auth-mode', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """auth_mode must be of a type compatible with auth-mode""",
'defined-type': "brocade-isis-operational:auth-mode",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'none': {'value': 0}, u'cleartext': {'value': 1}, u'md5': {'value': 2}},), is_leaf=True, yang_name="auth-mode", rest_name="auth-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='auth-mode', is_config=False)""",
})
self.__auth_mode = t
if hasattr(self, '_set'):
self._set()
def _unset_auth_mode(self):
self.__auth_mode = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'none': {'value': 0}, u'cleartext': {'value': 1}, u'md5': {'value': 2}},), is_leaf=True, yang_name="auth-mode", rest_name="auth-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='auth-mode', is_config=False)
def _get_auth_key(self):
"""
Getter method for auth_key, mapped from YANG variable /isis_state/interface_detail/isis_intf/l2_circ_metrics/auth_key (string)
YANG Description: IS-IS authentication key
"""
return self.__auth_key
def _set_auth_key(self, v, load=False):
"""
Setter method for auth_key, mapped from YANG variable /isis_state/interface_detail/isis_intf/l2_circ_metrics/auth_key (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_auth_key is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_auth_key() directly.
YANG Description: IS-IS authentication key
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="auth-key", rest_name="auth-key", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """auth_key must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="auth-key", rest_name="auth-key", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='string', is_config=False)""",
})
self.__auth_key = t
if hasattr(self, '_set'):
self._set()
def _unset_auth_key(self):
self.__auth_key = YANGDynClass(base=unicode, is_leaf=True, yang_name="auth-key", rest_name="auth-key", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='string', is_config=False)
def _get_circ_metric(self):
"""
Getter method for circ_metric, mapped from YANG variable /isis_state/interface_detail/isis_intf/l2_circ_metrics/circ_metric (uint32)
YANG Description: ISIS circuit Metric
"""
return self.__circ_metric
def _set_circ_metric(self, v, load=False):
"""
Setter method for circ_metric, mapped from YANG variable /isis_state/interface_detail/isis_intf/l2_circ_metrics/circ_metric (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_circ_metric is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_circ_metric() directly.
YANG Description: ISIS circuit Metric
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="circ-metric", rest_name="circ-metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """circ_metric must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="circ-metric", rest_name="circ-metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__circ_metric = t
if hasattr(self, '_set'):
self._set()
def _unset_circ_metric(self):
self.__circ_metric = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="circ-metric", rest_name="circ-metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_ip6_circ_metric(self):
"""
Getter method for ip6_circ_metric, mapped from YANG variable /isis_state/interface_detail/isis_intf/l2_circ_metrics/ip6_circ_metric (uint32)
YANG Description: ISISv6 circuit Metric
"""
return self.__ip6_circ_metric
def _set_ip6_circ_metric(self, v, load=False):
"""
Setter method for ip6_circ_metric, mapped from YANG variable /isis_state/interface_detail/isis_intf/l2_circ_metrics/ip6_circ_metric (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_ip6_circ_metric is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ip6_circ_metric() directly.
YANG Description: ISISv6 circuit Metric
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ip6-circ-metric", rest_name="ip6-circ-metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ip6_circ_metric must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ip6-circ-metric", rest_name="ip6-circ-metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__ip6_circ_metric = t
if hasattr(self, '_set'):
self._set()
def _unset_ip6_circ_metric(self):
self.__ip6_circ_metric = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ip6-circ-metric", rest_name="ip6-circ-metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_circ_priority(self):
"""
Getter method for circ_priority, mapped from YANG variable /isis_state/interface_detail/isis_intf/l2_circ_metrics/circ_priority (uint8)
YANG Description: Circuit Priority
"""
return self.__circ_priority
def _set_circ_priority(self, v, load=False):
"""
Setter method for circ_priority, mapped from YANG variable /isis_state/interface_detail/isis_intf/l2_circ_metrics/circ_priority (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_circ_priority is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_circ_priority() directly.
YANG Description: Circuit Priority
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="circ-priority", rest_name="circ-priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint8', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """circ_priority must be of a type compatible with uint8""",
'defined-type': "uint8",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="circ-priority", rest_name="circ-priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint8', is_config=False)""",
})
self.__circ_priority = t
if hasattr(self, '_set'):
self._set()
def _unset_circ_priority(self):
self.__circ_priority = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="circ-priority", rest_name="circ-priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint8', is_config=False)
def _get_hello_int(self):
"""
Getter method for hello_int, mapped from YANG variable /isis_state/interface_detail/isis_intf/l2_circ_metrics/hello_int (uint32)
YANG Description: Hello interval
"""
return self.__hello_int
def _set_hello_int(self, v, load=False):
"""
Setter method for hello_int, mapped from YANG variable /isis_state/interface_detail/isis_intf/l2_circ_metrics/hello_int (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_hello_int is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_hello_int() directly.
YANG Description: Hello interval
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="hello-int", rest_name="hello-int", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """hello_int must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="hello-int", rest_name="hello-int", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__hello_int = t
if hasattr(self, '_set'):
self._set()
def _unset_hello_int(self):
self.__hello_int = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="hello-int", rest_name="hello-int", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_hello_mult(self):
"""
Getter method for hello_mult, mapped from YANG variable /isis_state/interface_detail/isis_intf/l2_circ_metrics/hello_mult (uint32)
YANG Description: Hello multiplier
"""
return self.__hello_mult
def _set_hello_mult(self, v, load=False):
"""
Setter method for hello_mult, mapped from YANG variable /isis_state/interface_detail/isis_intf/l2_circ_metrics/hello_mult (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_hello_mult is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_hello_mult() directly.
YANG Description: Hello multiplier
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="hello-mult", rest_name="hello-mult", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """hello_mult must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="hello-mult", rest_name="hello-mult", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__hello_mult = t
if hasattr(self, '_set'):
self._set()
def _unset_hello_mult(self):
self.__hello_mult = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="hello-mult", rest_name="hello-mult", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_dis(self):
"""
Getter method for dis, mapped from YANG variable /isis_state/interface_detail/isis_intf/l2_circ_metrics/dis (string)
YANG Description: Designated IS
"""
return self.__dis
def _set_dis(self, v, load=False):
"""
Setter method for dis, mapped from YANG variable /isis_state/interface_detail/isis_intf/l2_circ_metrics/dis (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_dis is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dis() directly.
YANG Description: Designated IS
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="dis", rest_name="dis", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """dis must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="dis", rest_name="dis", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='string', is_config=False)""",
})
self.__dis = t
if hasattr(self, '_set'):
self._set()
def _unset_dis(self):
self.__dis = YANGDynClass(base=unicode, is_leaf=True, yang_name="dis", rest_name="dis", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='string', is_config=False)
def _get_dis_ch(self):
"""
Getter method for dis_ch, mapped from YANG variable /isis_state/interface_detail/isis_intf/l2_circ_metrics/dis_ch (uint32)
YANG Description: DIS changes
"""
return self.__dis_ch
def _set_dis_ch(self, v, load=False):
"""
Setter method for dis_ch, mapped from YANG variable /isis_state/interface_detail/isis_intf/l2_circ_metrics/dis_ch (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_dis_ch is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dis_ch() directly.
YANG Description: DIS changes
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="dis-ch", rest_name="dis-ch", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """dis_ch must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="dis-ch", rest_name="dis-ch", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__dis_ch = t
if hasattr(self, '_set'):
self._set()
def _unset_dis_ch(self):
self.__dis_ch = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="dis-ch", rest_name="dis-ch", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_next_hello(self):
"""
Getter method for next_hello, mapped from YANG variable /isis_state/interface_detail/isis_intf/l2_circ_metrics/next_hello (uint32)
YANG Description: Time remaining until next hello
"""
return self.__next_hello
def _set_next_hello(self, v, load=False):
"""
Setter method for next_hello, mapped from YANG variable /isis_state/interface_detail/isis_intf/l2_circ_metrics/next_hello (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_next_hello is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_next_hello() directly.
YANG Description: Time remaining until next hello
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="next-hello", rest_name="next-hello", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """next_hello must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="next-hello", rest_name="next-hello", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__next_hello = t
if hasattr(self, '_set'):
self._set()
def _unset_next_hello(self):
self.__next_hello = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="next-hello", rest_name="next-hello", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_active_adj(self):
"""
Getter method for active_adj, mapped from YANG variable /isis_state/interface_detail/isis_intf/l2_circ_metrics/active_adj (uint32)
YANG Description: Number of active adjacencies
"""
return self.__active_adj
def _set_active_adj(self, v, load=False):
"""
Setter method for active_adj, mapped from YANG variable /isis_state/interface_detail/isis_intf/l2_circ_metrics/active_adj (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_active_adj is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_active_adj() directly.
YANG Description: Number of active adjacencies
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="active-adj", rest_name="active-adj", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """active_adj must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="active-adj", rest_name="active-adj", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__active_adj = t
if hasattr(self, '_set'):
self._set()
def _unset_active_adj(self):
self.__active_adj = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="active-adj", rest_name="active-adj", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
auth_check = __builtin__.property(_get_auth_check)
auth_mode = __builtin__.property(_get_auth_mode)
auth_key = __builtin__.property(_get_auth_key)
circ_metric = __builtin__.property(_get_circ_metric)
ip6_circ_metric = __builtin__.property(_get_ip6_circ_metric)
circ_priority = __builtin__.property(_get_circ_priority)
hello_int = __builtin__.property(_get_hello_int)
hello_mult = __builtin__.property(_get_hello_mult)
dis = __builtin__.property(_get_dis)
dis_ch = __builtin__.property(_get_dis_ch)
next_hello = __builtin__.property(_get_next_hello)
active_adj = __builtin__.property(_get_active_adj)
_pyangbind_elements = {'auth_check': auth_check, 'auth_mode': auth_mode, 'auth_key': auth_key, 'circ_metric': circ_metric, 'ip6_circ_metric': ip6_circ_metric, 'circ_priority': circ_priority, 'hello_int': hello_int, 'hello_mult': hello_mult, 'dis': dis, 'dis_ch': dis_ch, 'next_hello': next_hello, 'active_adj': active_adj, }
| [
"badaniya@brocade.com"
] | badaniya@brocade.com |
9d4286f51f796ee07f9c9689d5e16a93c185dad9 | 2b398353f5b0529ac666ef180e9dc966474a70c0 | /vspk/v5_0/nuvm.py | cf6e82657c84100dc2dc0640b6f91968bc58d512 | [
"BSD-3-Clause"
] | permissive | nuagenetworks/vspk-python | e0c4570be81da2a4d8946299cb44eaf9559e0170 | 9a44d3015aa6424d0154c8c8a42297669cce11f9 | refs/heads/master | 2023-06-01T01:12:47.011489 | 2023-05-12T19:48:52 | 2023-05-12T19:48:52 | 53,171,411 | 21 | 18 | BSD-3-Clause | 2020-12-16T12:36:58 | 2016-03-04T23:10:58 | Python | UTF-8 | Python | false | false | 27,147 | py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUVMResyncsFetcher
from .fetchers import NUMetadatasFetcher
from .fetchers import NUAlarmsFetcher
from .fetchers import NUGlobalMetadatasFetcher
from .fetchers import NUVMInterfacesFetcher
from .fetchers import NUVRSsFetcher
from .fetchers import NUEventLogsFetcher
from bambou import NURESTObject
class NUVM(NURESTObject):
""" Represents a VM in the VSD
Notes:
API that can retrieve the VMs associated with a domain, zone or subnet for mediation created VM's for REST created VM's you need to set the additional proxy user header in http request : X-Nuage-ProxyUservalue of the header has to be either :1) enterpriseName@UserName (example : Alcatel Lucent@bob), or 2) external ID of user in VSD, typically is UUID generally decided by the CMS tool in questionUser needs to have CMS privileges to use proxy user header.
"""
__rest_name__ = "vm"
__resource_name__ = "vms"
## Constants
CONST_REASON_TYPE_SHUTDOWN_UNKNOWN = "SHUTDOWN_UNKNOWN"
CONST_REASON_TYPE_CRASHED_UNKNOWN = "CRASHED_UNKNOWN"
CONST_REASON_TYPE_PAUSED_IOERROR = "PAUSED_IOERROR"
CONST_STATUS_SHUTDOWN = "SHUTDOWN"
CONST_REASON_TYPE_SHUTDOWN_LAST = "SHUTDOWN_LAST"
CONST_STATUS_DELETE_PENDING = "DELETE_PENDING"
CONST_REASON_TYPE_RUNNING_UNKNOWN = "RUNNING_UNKNOWN"
CONST_STATUS_RUNNING = "RUNNING"
CONST_REASON_TYPE_RUNNING_LAST = "RUNNING_LAST"
CONST_REASON_TYPE_RUNNING_UNPAUSED = "RUNNING_UNPAUSED"
CONST_REASON_TYPE_PAUSED_FROM_SNAPSHOT = "PAUSED_FROM_SNAPSHOT"
CONST_REASON_TYPE_PAUSED_MIGRATION = "PAUSED_MIGRATION"
CONST_REASON_TYPE_RUNNING_BOOTED = "RUNNING_BOOTED"
CONST_REASON_TYPE_UNKNOWN = "UNKNOWN"
CONST_STATUS_UNREACHABLE = "UNREACHABLE"
CONST_STATUS_BLOCKED = "BLOCKED"
CONST_REASON_TYPE_SHUTOFF_DESTROYED = "SHUTOFF_DESTROYED"
CONST_REASON_TYPE_SHUTOFF_FROM_SNAPSHOT = "SHUTOFF_FROM_SNAPSHOT"
CONST_REASON_TYPE_SHUTOFF_UNKNOWN = "SHUTOFF_UNKNOWN"
CONST_STATUS_NOSTATE = "NOSTATE"
CONST_REASON_TYPE_PAUSED_DUMP = "PAUSED_DUMP"
CONST_REASON_TYPE_CRASHED_LAST = "CRASHED_LAST"
CONST_STATUS_CRASHED = "CRASHED"
CONST_REASON_TYPE_PAUSED_LAST = "PAUSED_LAST"
CONST_REASON_TYPE_BLOCKED_LAST = "BLOCKED_LAST"
CONST_REASON_TYPE_SHUTOFF_LAST = "SHUTOFF_LAST"
CONST_STATUS_SHUTOFF = "SHUTOFF"
CONST_REASON_TYPE_SHUTOFF_SHUTDOWN = "SHUTOFF_SHUTDOWN"
CONST_REASON_TYPE_NOSTATE_UNKNOWN = "NOSTATE_UNKNOWN"
CONST_REASON_TYPE_PAUSED_SAVE = "PAUSED_SAVE"
CONST_REASON_TYPE_RUNNING_FROM_SNAPSHOT = "RUNNING_FROM_SNAPSHOT"
CONST_STATUS_UNKNOWN = "UNKNOWN"
CONST_REASON_TYPE_PAUSED_UNKNOWN = "PAUSED_UNKNOWN"
CONST_REASON_TYPE_SHUTOFF_FAILED = "SHUTOFF_FAILED"
CONST_REASON_TYPE_SHUTOFF_SAVED = "SHUTOFF_SAVED"
CONST_REASON_TYPE_SHUTOFF_MIGRATED = "SHUTOFF_MIGRATED"
CONST_STATUS_LAST = "LAST"
CONST_REASON_TYPE_RUNNING_MIGRATED = "RUNNING_MIGRATED"
CONST_REASON_TYPE_RUNNING_SAVE_CANCELED = "RUNNING_SAVE_CANCELED"
CONST_REASON_TYPE_SHUTDOWN_USER = "SHUTDOWN_USER"
CONST_REASON_TYPE_RUNNING_MIGRATION_CANCELED = "RUNNING_MIGRATION_CANCELED"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
CONST_STATUS_PAUSED = "PAUSED"
CONST_STATUS_INIT = "INIT"
CONST_REASON_TYPE_BLOCKED_UNKNOWN = "BLOCKED_UNKNOWN"
CONST_REASON_TYPE_NOSTATE_LAST = "NOSTATE_LAST"
CONST_REASON_TYPE_RUNNING_RESTORED = "RUNNING_RESTORED"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_REASON_TYPE_SHUTOFF_CRASHED = "SHUTOFF_CRASHED"
CONST_REASON_TYPE_PAUSED_USER = "PAUSED_USER"
CONST_DELETE_MODE_TIMER = "TIMER"
CONST_REASON_TYPE_PAUSED_WATCHDOG = "PAUSED_WATCHDOG"
CONST_REASON_TYPE_PAUSED_SHUTTING_DOWN = "PAUSED_SHUTTING_DOWN"
def __init__(self, **kwargs):
""" Initializes a VM instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> vm = NUVM(id=u'xxxx-xxx-xxx-xxx', name=u'VM')
>>> vm = NUVM(data=my_dict)
"""
super(NUVM, self).__init__()
# Read/Write Attributes
self._l2_domain_ids = None
self._vrsid = None
self._uuid = None
self._name = None
self._last_updated_by = None
self._reason_type = None
self._delete_expiry = None
self._delete_mode = None
self._resync_info = None
self._site_identifier = None
self._interfaces = None
self._enterprise_id = None
self._enterprise_name = None
self._entity_scope = None
self._domain_ids = None
self._compute_provisioned = None
self._zone_ids = None
self._orchestration_id = None
self._user_id = None
self._user_name = None
self._status = None
self._subnet_ids = None
self._external_id = None
self._hypervisor_ip = None
self.expose_attribute(local_name="l2_domain_ids", remote_name="l2DomainIDs", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="vrsid", remote_name="VRSID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="uuid", remote_name="UUID", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="reason_type", remote_name="reasonType", attribute_type=str, is_required=False, is_unique=False, choices=[u'BLOCKED_LAST', u'BLOCKED_UNKNOWN', u'CRASHED_LAST', u'CRASHED_UNKNOWN', u'NOSTATE_LAST', u'NOSTATE_UNKNOWN', u'PAUSED_DUMP', u'PAUSED_FROM_SNAPSHOT', u'PAUSED_IOERROR', u'PAUSED_LAST', u'PAUSED_MIGRATION', u'PAUSED_SAVE', u'PAUSED_SHUTTING_DOWN', u'PAUSED_UNKNOWN', u'PAUSED_USER', u'PAUSED_WATCHDOG', u'RUNNING_BOOTED', u'RUNNING_FROM_SNAPSHOT', u'RUNNING_LAST', u'RUNNING_MIGRATED', u'RUNNING_MIGRATION_CANCELED', u'RUNNING_RESTORED', u'RUNNING_SAVE_CANCELED', u'RUNNING_UNKNOWN', u'RUNNING_UNPAUSED', u'SHUTDOWN_LAST', u'SHUTDOWN_UNKNOWN', u'SHUTDOWN_USER', u'SHUTOFF_CRASHED', u'SHUTOFF_DESTROYED', u'SHUTOFF_FAILED', u'SHUTOFF_FROM_SNAPSHOT', u'SHUTOFF_LAST', u'SHUTOFF_MIGRATED', u'SHUTOFF_SAVED', u'SHUTOFF_SHUTDOWN', u'SHUTOFF_UNKNOWN', u'UNKNOWN'])
self.expose_attribute(local_name="delete_expiry", remote_name="deleteExpiry", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="delete_mode", remote_name="deleteMode", attribute_type=str, is_required=False, is_unique=False, choices=[u'TIMER'])
self.expose_attribute(local_name="resync_info", remote_name="resyncInfo", attribute_type=dict, is_required=False, is_unique=False)
self.expose_attribute(local_name="site_identifier", remote_name="siteIdentifier", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="interfaces", remote_name="interfaces", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="enterprise_id", remote_name="enterpriseID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="enterprise_name", remote_name="enterpriseName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="domain_ids", remote_name="domainIDs", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="compute_provisioned", remote_name="computeProvisioned", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="zone_ids", remote_name="zoneIDs", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="orchestration_id", remote_name="orchestrationID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="user_id", remote_name="userID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="user_name", remote_name="userName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="status", remote_name="status", attribute_type=str, is_required=False, is_unique=False, choices=[u'BLOCKED', u'CRASHED', u'DELETE_PENDING', u'INIT', u'LAST', u'NOSTATE', u'PAUSED', u'RUNNING', u'SHUTDOWN', u'SHUTOFF', u'UNKNOWN', u'UNREACHABLE'])
self.expose_attribute(local_name="subnet_ids", remote_name="subnetIDs", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
self.expose_attribute(local_name="hypervisor_ip", remote_name="hypervisorIP", attribute_type=str, is_required=False, is_unique=False)
# Fetchers
self.vm_resyncs = NUVMResyncsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.alarms = NUAlarmsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.vm_interfaces = NUVMInterfacesFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.vrss = NUVRSsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.event_logs = NUEventLogsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def l2_domain_ids(self):
""" Get l2_domain_ids value.
Notes:
Array of IDs of the l2 domain that the VM is connected to
This attribute is named `l2DomainIDs` in VSD API.
"""
return self._l2_domain_ids
@l2_domain_ids.setter
def l2_domain_ids(self, value):
""" Set l2_domain_ids value.
Notes:
Array of IDs of the l2 domain that the VM is connected to
This attribute is named `l2DomainIDs` in VSD API.
"""
self._l2_domain_ids = value
@property
def vrsid(self):
""" Get vrsid value.
Notes:
Id of the VRS that this VM is attached to.
This attribute is named `VRSID` in VSD API.
"""
return self._vrsid
@vrsid.setter
def vrsid(self, value):
""" Set vrsid value.
Notes:
Id of the VRS that this VM is attached to.
This attribute is named `VRSID` in VSD API.
"""
self._vrsid = value
@property
def uuid(self):
""" Get uuid value.
Notes:
UUID of the VM
This attribute is named `UUID` in VSD API.
"""
return self._uuid
@uuid.setter
def uuid(self, value):
""" Set uuid value.
Notes:
UUID of the VM
This attribute is named `UUID` in VSD API.
"""
self._uuid = value
@property
def name(self):
""" Get name value.
Notes:
Name of the VM
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
Name of the VM
"""
self._name = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def reason_type(self):
""" Get reason_type value.
Notes:
Reason of the event associated with the VM.
This attribute is named `reasonType` in VSD API.
"""
return self._reason_type
@reason_type.setter
def reason_type(self, value):
""" Set reason_type value.
Notes:
Reason of the event associated with the VM.
This attribute is named `reasonType` in VSD API.
"""
self._reason_type = value
@property
def delete_expiry(self):
""" Get delete_expiry value.
Notes:
reflects the VM Deletion expiry timer in secs , deleteMode needs to be non-null value for deleteExpiry to be taken in to effect. CMS created VM's will always have deleteMode set to TIMER
This attribute is named `deleteExpiry` in VSD API.
"""
return self._delete_expiry
@delete_expiry.setter
def delete_expiry(self, value):
""" Set delete_expiry value.
Notes:
reflects the VM Deletion expiry timer in secs , deleteMode needs to be non-null value for deleteExpiry to be taken in to effect. CMS created VM's will always have deleteMode set to TIMER
This attribute is named `deleteExpiry` in VSD API.
"""
self._delete_expiry = value
@property
def delete_mode(self):
""" Get delete_mode value.
Notes:
reflects the mode of VM Deletion - TIMER Possible values are TIMER, .
This attribute is named `deleteMode` in VSD API.
"""
return self._delete_mode
@delete_mode.setter
def delete_mode(self, value):
""" Set delete_mode value.
Notes:
reflects the mode of VM Deletion - TIMER Possible values are TIMER, .
This attribute is named `deleteMode` in VSD API.
"""
self._delete_mode = value
@property
def resync_info(self):
""" Get resync_info value.
Notes:
Information of the status of the resync operation of a VM
This attribute is named `resyncInfo` in VSD API.
"""
return self._resync_info
@resync_info.setter
def resync_info(self, value):
""" Set resync_info value.
Notes:
Information of the status of the resync operation of a VM
This attribute is named `resyncInfo` in VSD API.
"""
self._resync_info = value
@property
def site_identifier(self):
""" Get site_identifier value.
Notes:
This property specifies the site the VM belongs to, for Geo-redundancy.
This attribute is named `siteIdentifier` in VSD API.
"""
return self._site_identifier
@site_identifier.setter
def site_identifier(self, value):
""" Set site_identifier value.
Notes:
This property specifies the site the VM belongs to, for Geo-redundancy.
This attribute is named `siteIdentifier` in VSD API.
"""
self._site_identifier = value
@property
def interfaces(self):
""" Get interfaces value.
Notes:
List of VM interfaces associated with the VM
"""
return self._interfaces
@interfaces.setter
def interfaces(self, value):
""" Set interfaces value.
Notes:
List of VM interfaces associated with the VM
"""
self._interfaces = value
@property
def enterprise_id(self):
""" Get enterprise_id value.
Notes:
ID of the enterprise that this VM belongs to
This attribute is named `enterpriseID` in VSD API.
"""
return self._enterprise_id
@enterprise_id.setter
def enterprise_id(self, value):
""" Set enterprise_id value.
Notes:
ID of the enterprise that this VM belongs to
This attribute is named `enterpriseID` in VSD API.
"""
self._enterprise_id = value
@property
def enterprise_name(self):
""" Get enterprise_name value.
Notes:
Name of the enterprise that this VM belongs to
This attribute is named `enterpriseName` in VSD API.
"""
return self._enterprise_name
@enterprise_name.setter
def enterprise_name(self, value):
""" Set enterprise_name value.
Notes:
Name of the enterprise that this VM belongs to
This attribute is named `enterpriseName` in VSD API.
"""
self._enterprise_name = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def domain_ids(self):
""" Get domain_ids value.
Notes:
Array of IDs of the domain that the VM is connected to
This attribute is named `domainIDs` in VSD API.
"""
return self._domain_ids
@domain_ids.setter
def domain_ids(self, value):
""" Set domain_ids value.
Notes:
Array of IDs of the domain that the VM is connected to
This attribute is named `domainIDs` in VSD API.
"""
self._domain_ids = value
@property
def compute_provisioned(self):
""" Get compute_provisioned value.
Notes:
computeProvisioned
This attribute is named `computeProvisioned` in VSD API.
"""
return self._compute_provisioned
@compute_provisioned.setter
def compute_provisioned(self, value):
""" Set compute_provisioned value.
Notes:
computeProvisioned
This attribute is named `computeProvisioned` in VSD API.
"""
self._compute_provisioned = value
@property
def zone_ids(self):
""" Get zone_ids value.
Notes:
Array of IDs of the zone that this VM is attached to
This attribute is named `zoneIDs` in VSD API.
"""
return self._zone_ids
@zone_ids.setter
def zone_ids(self, value):
""" Set zone_ids value.
Notes:
Array of IDs of the zone that this VM is attached to
This attribute is named `zoneIDs` in VSD API.
"""
self._zone_ids = value
@property
def orchestration_id(self):
""" Get orchestration_id value.
Notes:
Orchestration ID
This attribute is named `orchestrationID` in VSD API.
"""
return self._orchestration_id
@orchestration_id.setter
def orchestration_id(self, value):
""" Set orchestration_id value.
Notes:
Orchestration ID
This attribute is named `orchestrationID` in VSD API.
"""
self._orchestration_id = value
@property
def user_id(self):
""" Get user_id value.
Notes:
ID of the user that created this VM
This attribute is named `userID` in VSD API.
"""
return self._user_id
@user_id.setter
def user_id(self, value):
""" Set user_id value.
Notes:
ID of the user that created this VM
This attribute is named `userID` in VSD API.
"""
self._user_id = value
@property
def user_name(self):
""" Get user_name value.
Notes:
Username of the user that created this VM
This attribute is named `userName` in VSD API.
"""
return self._user_name
@user_name.setter
def user_name(self, value):
""" Set user_name value.
Notes:
Username of the user that created this VM
This attribute is named `userName` in VSD API.
"""
self._user_name = value
@property
def status(self):
""" Get status value.
Notes:
Status of the VM.
"""
return self._status
@status.setter
def status(self, value):
""" Set status value.
Notes:
Status of the VM.
"""
self._status = value
@property
def subnet_ids(self):
""" Get subnet_ids value.
Notes:
Array of IDs of the subnets that the VM is connected to
This attribute is named `subnetIDs` in VSD API.
"""
return self._subnet_ids
@subnet_ids.setter
def subnet_ids(self, value):
""" Set subnet_ids value.
Notes:
Array of IDs of the subnets that the VM is connected to
This attribute is named `subnetIDs` in VSD API.
"""
self._subnet_ids = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
@property
def hypervisor_ip(self):
""" Get hypervisor_ip value.
Notes:
IP address of the hypervisor that this VM is currently running in
This attribute is named `hypervisorIP` in VSD API.
"""
return self._hypervisor_ip
@hypervisor_ip.setter
def hypervisor_ip(self, value):
""" Set hypervisor_ip value.
Notes:
IP address of the hypervisor that this VM is currently running in
This attribute is named `hypervisorIP` in VSD API.
"""
self._hypervisor_ip = value
| [
"corentin.henry@nokia.com"
] | corentin.henry@nokia.com |
dc22848f4ebe315f336530a4cd8573b1d181df82 | 39b9ae78f0bfb17fbdc8bbfa604f856e753154d3 | /src/aac_datasets/utils/collate.py | f7f3a8939d50e9fff207e1268e054f28361adb27 | [
"MIT"
] | permissive | Labbeti/aac-datasets | 9de00690dcf95fcdc39ec4c2f82b598576fa4bbb | 52133540542c3b1b1fba9546e795a96622979056 | refs/heads/main | 2023-08-31T00:17:24.325928 | 2023-05-11T08:54:25 | 2023-05-11T08:54:25 | 493,979,158 | 46 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,336 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from typing import Any, Dict, List, TypeVar, Union
import torch
from torch import Tensor
from torch.nn import functional as F
T = TypeVar("T")
class BasicCollate:
"""Collate object for :class:`~torch.utils.data.dataloader.DataLoader`.
Merge lists in dicts into a single dict of lists. No padding is applied.
"""
def __call__(self, batch_lst: List[Dict[str, Any]]) -> Dict[str, List[Any]]:
return list_dict_to_dict_list(batch_lst)
class AdvancedCollate:
"""Advanced collate object for :class:`~torch.utils.data.dataloader.DataLoader`.
Merge lists in dicts into a single dict of lists.
Audio will be padded if a fill value is given in `__init__`.
.. code-block:: python
:caption: Example
>>> collate = AdvancedCollate({"audio": 0.0})
>>> loader = DataLoader(..., collate_fn=collate)
>>> next(iter(loader))
... {"audio": tensor([[...]]), ...}
"""
def __init__(self, fill_values: Dict[str, Union[float, int]]) -> None:
super().__init__()
self.fill_values = fill_values
def __call__(self, batch_lst: List[Dict[str, Any]]) -> Dict[str, Any]:
batch_dic: Dict[str, Any] = list_dict_to_dict_list(batch_lst)
keys = list(batch_dic.keys())
for key in keys:
values = batch_dic[key]
if len(values) == 0:
if key in self.fill_values:
values = torch.as_tensor(values)
batch_dic[key] = values
continue
if key in self.fill_values:
values = list(map(torch.as_tensor, values))
else:
are_tensors = [isinstance(value, Tensor) for value in values]
if not all(are_tensors):
batch_dic[key] = values
continue
are_stackables = [value.shape == values[0].shape for value in values]
if all(are_stackables):
values = torch.stack(values)
batch_dic[key] = values
continue
if key in self.fill_values:
are_paddable = [
value.ndim > 0 and value.shape[:-1] == values[0].shape[:-1]
for value in values
]
if all(are_paddable):
target_length = max(audio_i.shape[-1] for audio_i in values)
values = torch.stack(
[
pad_last_dim(audio_i, target_length, self.fill_values[key])
for audio_i in values
]
)
batch_dic[key] = values
return batch_dic
def pad_last_dim(tensor: Tensor, target_length: int, pad_value: float) -> Tensor:
"""Left padding tensor at last dim.
:param tensor: Tensor of at least 1 dim. (..., T)
:param target_length: Target length of the last dim. If target_length <= T, the function has no effect.
:param pad_value: Fill value used to pad tensor.
:returns: A tensor of shape (..., target_length).
"""
pad_len = max(target_length - tensor.shape[-1], 0)
return F.pad(tensor, [0, pad_len], value=pad_value)
def list_dict_to_dict_list(
lst: List[Dict[str, T]],
key_mode: str = "intersect",
) -> Dict[str, List[T]]:
"""Convert list of dicts to dict of lists.
:param lst: The list of dict to merge.
:param key_mode: Can be "same" or "intersect".
If "same", all the dictionaries must contains the same keys otherwise a ValueError will be raised.
If "intersect", only the intersection of all keys will be used in output.
:returns: The dictionary of lists.
"""
if len(lst) == 0:
return {}
keys = set(lst[0].keys())
if key_mode == "same":
if not all(keys == set(item.keys()) for item in lst[1:]):
raise ValueError("Invalid keys for batch.")
elif key_mode == "intersect":
for item in lst[1:]:
keys = keys.intersection(item.keys())
else:
KEY_MODES = ("same", "intersect")
raise ValueError(
f"Invalid argument key_mode={key_mode}. (expected one of {KEY_MODES})"
)
return {key: [item[key] for item in lst] for key in keys}
| [
"etienne.labbe31@gmail.com"
] | etienne.labbe31@gmail.com |
bb5798af7f5b7ac852880514e51bfbacf911e296 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /v34oCTbkrceCZjgRE_10.py | c54979682cf50d42833333a04ece99accfcf5aa3 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,233 | py | """
The **right shift** operation is similar to **floor division by powers of
two** , thus, the process is _repetitive_ and can be done _recursively_.
Sample calculation using the right shift operator ( `>>` ):
80 >> 3 = floor(80/2^3) = floor(80/8) = 10
-24 >> 2 = floor(-24/2^2) = floor(-24/4) = -6
-5 >> 1 = floor(-5/2^1) = floor(-5/2) = -3
Write a function that **mimics** (without the use of **> >**) the right shift
operator and returns the result from the two given integers.
### Examples
shift_to_right(80, 3) ➞ 10
shift_to_right(-24, 2) ➞ -6
shift_to_right(-5, 1) ➞ -3
shift_to_right(4666, 6) ➞ 72
shift_to_right(3777, 6) ➞ 59
shift_to_right(-512, 10) ➞ -1
### Notes
* There will be no negative values for the second parameter `y`.
* This challenge is more like recreating of the **right shift** operation, thus, **the use of the operator directly** is **prohibited**.
* You are expected to solve this challenge via **recursion**.
* An **iterative** version of this challenge can be found via this [link](https://edabit.com/challenge/noqQNSr5o9qzvXWzL).
"""
def shift_to_right(x, y):
return shift_to_right(x//2,y-1) if y else x
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
6904c6701cd55c3d5a8ea4a4ab5985c5fede348c | ffe606c85de9009d2c15356f82daa524c343b925 | /12.5.redux/data/mkInterRaw.py | 44cdb9a989c9b57e6034b64602279c91e4946483 | [] | no_license | jbinkleyj/story_writer | d88ff7e3360fb8afd12445d1cb237788636b3083 | dc5106a35f5fbce72f8cf0801c0ad4cbc0c9f12f | refs/heads/master | 2020-07-09T15:54:02.492373 | 2017-12-16T07:26:59 | 2017-12-16T07:26:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,943 | py | import os
import argparse
from nltk.stem import WordNetLemmatizer as WNL
wnl = WNL()
with open("nouncount.raw") as f:
nouns = [x.split("\t")[0] for x in f.readlines()]
print(len(nouns))
nouns = [wnl.lemmatize(n,pos='n') for n in nouns]
print(len(set(nouns)))
exit()
def parseParams():
parser = argparse.ArgumentParser(description='none')
parser.add_argument('-hn', type=int, default=5000)
parser.add_argument('-taken', type=int, default=500)
parser.add_argument('-hv', type=int, default=10000)
parser.add_argument('-takev', type=int, default=250)
parser.add_argument('-nouns', type=str, default="nouncount.raw")
parser.add_argument('-verbs', type=str, default="verbcount.raw")
parser.add_argument('-out', type=str, default="train.raw")
args= parser.parse_args()
return args
def getvocab(args):
nouns = []
with open(args.nouns) as f:
i = 0
while i<args.taken:
l = next(f)
w,k = l.split('\t')
k = int(k)
if k<args.hn:
nouns.append(w)
i+=1
verbs = []
with open(args.verbs) as f:
i = 0
while i<args.takev:
l = next(f)
w,k = l.split('\t')
k = int(k)
if k<args.hv:
verbs.append(w)
i+=1
return set(nouns),set(verbs)
def main():
args = parseParams()
nouns, verbs = getvocab(args)
print(len(nouns))
print(nouns)
with open('train.idxs') as f:
tidx = set([int(x) for x in f.readlines()])
with open('nv.all') as f:
nv = [x for i,x in enumerate(f.readlines()) if i in tidx]
data = []
for l in nv:
tmp = []
for x in l.lower().split(" "):
w,cat = x.split("_")
if cat[0]=="n":
if w in nouns:
tmp.append(w+"_N")
elif cat[0]=="v":
if w in verbs:
tmp.append(w+"_V")
if not tmp:
tmp = ["<NO_ITEMS>"]
data.append(" ".join(tmp))
with open(args.out,'w') as f:
f.write("\n".join(data))
if __name__=="__main__":
main()
| [
"kedzior@uw.edu"
] | kedzior@uw.edu |
b51dc9a0519f650354fb3bd983db8ea83f8bcada | 04803c70bb97012b7d500a177ac0240fb2ddbe38 | /blend3_pdep/pdep/network742_1.py | e379e6bbd30b94463fcbbfb7db73fc42e093f1cc | [] | no_license | shenghuiqin/chpd | 735e0415f6688d88579fc935459c1b0f53596d1d | 396ba54629036e3f2be0b3fabe09b78c90d56939 | refs/heads/master | 2023-03-01T23:29:02.118150 | 2019-10-05T04:02:23 | 2019-10-05T04:02:23 | 192,084,217 | 0 | 0 | null | 2019-06-18T18:33:13 | 2019-06-15T13:52:28 | HTML | UTF-8 | Python | false | false | 111,220 | py | species(
label = '[CH2]CC([CH]C(C)=O)C=O(8130)',
structure = SMILES('[CH2]CC(C=O)C=C(C)[O]'),
E0 = (-61.31,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3010,987.5,1337.5,450,1655,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3100,440,815,1455,1000,1380,1390,370,380,2900,435,2750,2850,1437.5,1250,1305,750,350,350,440,435,1725,2782.5,750,1395,475,1775,1000,200,800,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (126.153,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.097499,0.090452,-9.22832e-05,5.51748e-08,-1.39828e-11,-7237.28,34.9678], Tmin=(100,'K'), Tmax=(936.098,'K')), NASAPolynomial(coeffs=[10.6278,0.0454531,-2.01738e-05,3.81787e-09,-2.66455e-13,-9208.67,-15.1392], Tmin=(936.098,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-61.31,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(436.51,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-Cd)H) + group(Cs-(Cds-O2d)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsOs) + group(Cds-CdsCsH) + group(Cds-OdCsH) + radical(RCCJ) + radical(C=C(C)OJ)"""),
)
species(
label = 'C2H4(19)(20)',
structure = SMILES('C=C'),
E0 = (42.0619,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3000,3050,3100,1330,1430,900,1050,1000,1050,1600,1700],'cm^-1')),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (28.0532,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(2334.71,'J/mol'), sigma=(3.971,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=1.5, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.9592,-0.00757051,5.7099e-05,-6.91588e-08,2.69884e-11,5089.78,4.0973], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[3.99183,0.0104834,-3.71721e-06,5.94628e-10,-3.5363e-14,4268.66,-0.269082], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(42.0619,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(133.032,'J/(mol*K)'), label="""C2H4""", comment="""Thermo library: Klippenstein_Glarborg2016"""),
)
species(
label = 'S(247)(246)',
structure = SMILES('CC(=O)C=CC=O'),
E0 = (-249.982,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2782.5,750,1395,475,1775,1000,2750,2800,2850,1350,1500,750,1050,1375,1000,2995,3025,975,1000,1300,1375,400,500,1630,1680,375,552.5,462.5,1710,338.299],'cm^-1')),
HinderedRotor(inertia=(0.102894,'amu*angstrom^2'), symmetry=1, barrier=(8.35729,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.1029,'amu*angstrom^2'), symmetry=1, barrier=(8.35732,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.102901,'amu*angstrom^2'), symmetry=1, barrier=(8.35737,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (98.0999,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3947.01,'J/mol'), sigma=(6.07703,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=616.51 K, Pc=39.91 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.72359,0.0430426,-2.26393e-05,4.45245e-09,-2.82809e-13,-30034.5,19.0288], Tmin=(100,'K'), Tmax=(2443.83,'K')), NASAPolynomial(coeffs=[29.6812,0.00674437,-5.16283e-06,9.95166e-10,-6.31701e-14,-45547.2,-139.895], Tmin=(2443.83,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-249.982,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(295.164,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-O2d)HHH) + group(Cds-O2d(Cds-Cds)Cs) + group(Cd-Cd(CO)H) + group(Cd-Cd(CO)H) + group(Cds-O2d(Cds-Cds)H)"""),
)
species(
label = 'CC(=O)C1CCC1[CH][O](8203)',
structure = SMILES('CC(=O)C1CCC1[CH][O]'),
E0 = (60.0158,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (126.153,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.664167,0.0728434,-4.55369e-05,1.40815e-08,-1.79969e-12,7337.68,31.2523], Tmin=(100,'K'), Tmax=(1716.12,'K')), NASAPolynomial(coeffs=[14.3879,0.0408558,-1.75778e-05,3.22025e-09,-2.17452e-13,2627.34,-42.3686], Tmin=(1716.12,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(60.0158,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(444.824,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsH) + group(Cs-CsCsCsH) + group(Cs-(Cds-O2d)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsHH) + group(Cs-(Cds-O2d)HHH) + group(Cds-OdCsCs) + ring(Cyclobutane) + radical(CCOJ) + radical(CCsJOH)"""),
)
species(
label = 'CC([O])=CC1CCC1[O](8266)',
structure = SMILES('CC([O])=CC1CCC1[O]'),
E0 = (31.2339,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (126.153,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.289133,0.0625278,9.63597e-06,-6.02152e-08,2.82666e-11,3907.3,32.0457], Tmin=(100,'K'), Tmax=(972.465,'K')), NASAPolynomial(coeffs=[19.353,0.0298411,-1.04798e-05,1.92923e-09,-1.40219e-13,-1962.71,-70.5114], Tmin=(972.465,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(31.2339,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(448.981,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsH) + group(O2s-(Cds-Cd)H) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsOs) + group(Cds-CdsCsH) + ring(Cyclobutane) + radical(C=C(C)OJ) + radical(CC(C)OJ)"""),
)
species(
label = '[CH2]CC1C=C(C)OC1[O](8267)',
structure = SMILES('[CH2]CC1C=C(C)OC1[O]'),
E0 = (4.87284,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (126.153,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.536939,0.0620012,-3.42384e-06,-3.85323e-08,1.9119e-11,723.378,29.4367], Tmin=(100,'K'), Tmax=(977.352,'K')), NASAPolynomial(coeffs=[15.2521,0.0348263,-1.24405e-05,2.2174e-09,-1.54749e-13,-3731.49,-49.2937], Tmin=(977.352,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(4.87284,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(444.824,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-Cs(Cds-Cd)) + group(O2s-CsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsOsOsH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsOs) + group(Cds-CdsCsH) + ring(2,3-Dihydrofuran) + radical(RCCJ) + radical(CCOJ)"""),
)
species(
label = 'H(3)(3)',
structure = SMILES('[H]'),
E0 = (211.792,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (1.00794,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1205.6,'J/mol'), sigma=(2.05,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,9.24385e-15,-1.3678e-17,6.66185e-21,-1.00107e-24,25472.7,-0.459566], Tmin=(100,'K'), Tmax=(3459.6,'K')), NASAPolynomial(coeffs=[2.5,9.20456e-12,-3.58608e-15,6.15199e-19,-3.92042e-23,25472.7,-0.459566], Tmin=(3459.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(211.792,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""H""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = 'C=CC(C=O)C=C(C)[O](8268)',
structure = SMILES('C=CC(C=O)C=C(C)[O]'),
E0 = (-139.909,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2995,3025,975,1000,1300,1375,400,500,1630,1680,2750,2800,2850,1350,1500,750,1050,1375,1000,2950,3100,1380,975,1025,1650,1380,1390,370,380,2900,435,350,440,435,1725,2782.5,750,1395,475,1775,1000,239.176,239.182,239.189],'cm^-1')),
HinderedRotor(inertia=(0.179415,'amu*angstrom^2'), symmetry=1, barrier=(7.28424,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.179413,'amu*angstrom^2'), symmetry=1, barrier=(7.28418,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.179422,'amu*angstrom^2'), symmetry=1, barrier=(7.28419,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.179424,'amu*angstrom^2'), symmetry=1, barrier=(7.28414,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (125.145,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.307795,0.0856099,-8.64933e-05,5.08952e-08,-1.26947e-11,-16697.9,32.2318], Tmin=(100,'K'), Tmax=(949.619,'K')), NASAPolynomial(coeffs=[10.4236,0.0429995,-1.91862e-05,3.64286e-09,-2.54795e-13,-18619.1,-16.0482], Tmin=(949.619,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-139.909,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(415.724,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-Cd)H) + group(Cs-(Cds-O2d)CsCsH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsOs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-OdCsH) + group(Cds-CdsHH) + radical(C=C(C)OJ)"""),
)
species(
label = 'CC([O])=CC=C[O](7453)',
structure = SMILES('CC([O])=CC=C[O]'),
E0 = (-86.1882,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([350,440,435,1725,2750,2800,2850,1350,1500,750,1050,1375,1000,2995,3010,3025,975,987.5,1000,1300,1337.5,1375,400,450,500,1630,1655,1680,180,180,180],'cm^-1')),
HinderedRotor(inertia=(0.912938,'amu*angstrom^2'), symmetry=1, barrier=(20.9902,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.915391,'amu*angstrom^2'), symmetry=1, barrier=(21.0466,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (98.0999,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.756235,0.0562742,-1.51561e-05,-3.6247e-08,2.26938e-11,-10235.2,24.2724], Tmin=(100,'K'), Tmax=(917.521,'K')), NASAPolynomial(coeffs=[21.0957,0.00729401,3.01004e-08,-1.33448e-10,7.27657e-15,-15638.3,-81.2071], Tmin=(917.521,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-86.1882,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(299.321,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-Cd)H) + group(O2s-(Cds-Cd)H) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsOs) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsOsH) + radical(C=C(C)OJ) + radical(C=COJ)"""),
)
species(
label = 'CH3(15)(16)',
structure = SMILES('[CH3]'),
E0 = (136.188,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([604.263,1333.71,1492.19,2836.77,2836.77,3806.92],'cm^-1')),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (15.0345,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.65718,0.0021266,5.45839e-06,-6.6181e-09,2.46571e-12,16422.7,1.67354], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.97812,0.00579785,-1.97558e-06,3.07298e-10,-1.79174e-14,16509.5,4.72248], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(136.188,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(83.1447,'J/(mol*K)'), label="""CH3""", comment="""Thermo library: Klippenstein_Glarborg2016"""),
)
species(
label = '[CH2]CC(C=O)C=C=O(8269)',
structure = SMILES('[CH2]CC(C=O)C=C=O'),
E0 = (-34.4202,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2120,512.5,787.5,3010,987.5,1337.5,450,1655,1380,1390,370,380,2900,435,3000,3100,440,815,1455,1000,2750,2850,1437.5,1250,1305,750,350,2782.5,750,1395,475,1775,1000,284.339,285.586],'cm^-1')),
HinderedRotor(inertia=(0.13735,'amu*angstrom^2'), symmetry=1, barrier=(7.9913,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.138334,'amu*angstrom^2'), symmetry=1, barrier=(7.98681,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0102449,'amu*angstrom^2'), symmetry=1, barrier=(7.98099,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.028254,'amu*angstrom^2'), symmetry=1, barrier=(21.9659,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (111.119,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.12913,0.0648507,-5.37889e-05,2.42265e-08,-4.61997e-12,-4037.98,29.8972], Tmin=(100,'K'), Tmax=(1207.96,'K')), NASAPolynomial(coeffs=[10.5377,0.0336961,-1.5103e-05,2.87633e-09,-2.01408e-13,-6311.05,-17.2715], Tmin=(1207.96,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-34.4202,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(340.893,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cdd-O2d)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cds-(Cdd-O2d)CsH) + group(Cds-OdCsH) + radical(RCCJ)"""),
)
species(
label = 'C[CH]C(C=O)C=C(C)[O](8270)',
structure = SMILES('C[CH]C(C=O)C=C(C)[O]'),
E0 = (-66.6543,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (126.153,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.195534,0.0853528,-7.68025e-05,3.88413e-08,-8.26993e-12,-7881.07,35.3152], Tmin=(100,'K'), Tmax=(1102.91,'K')), NASAPolynomial(coeffs=[12.083,0.0422396,-1.81668e-05,3.39822e-09,-2.35944e-13,-10503.2,-23.1995], Tmin=(1102.91,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-66.6543,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(436.51,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-Cd)H) + group(Cs-(Cds-O2d)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsOs) + group(Cds-CdsCsH) + group(Cds-OdCsH) + radical(C=C(C)OJ) + radical(CCJCC=O)"""),
)
species(
label = 'CC[C](C=O)C=C(C)[O](8271)',
structure = SMILES('CC[C](C=O)C=C(C)[O]'),
E0 = (-146.856,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (126.153,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0820013,0.0804188,-5.9036e-05,2.2438e-08,-3.47351e-12,-17508.1,33.2794], Tmin=(100,'K'), Tmax=(1510.75,'K')), NASAPolynomial(coeffs=[17.1674,0.0347481,-1.36904e-05,2.42793e-09,-1.62247e-13,-22720,-57.056], Tmin=(1510.75,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-146.856,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(436.51,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-Cd)H) + group(Cs-(Cds-O2d)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsOs) + group(Cds-CdsCsH) + group(Cds-OdCsH) + radical(C=CCJ(C)C=O) + radical(C=C(C)OJ)"""),
)
species(
label = '[CH2]CC(C=O)C=C([CH2])O(8272)',
structure = SMILES('[CH2]CC(C=O)C=C([CH2])O'),
E0 = (-40.1981,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3615,1277.5,1000,3010,987.5,1337.5,450,1655,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,1380,1390,370,380,2900,435,2750,2850,1437.5,1250,1305,750,350,350,440,435,1725,2782.5,750,1395,475,1775,1000,200,800],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (126.153,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.586387,0.098478,-0.000101041,5.51648e-08,-1.20866e-11,-4667.48,36.597], Tmin=(100,'K'), Tmax=(1105.17,'K')), NASAPolynomial(coeffs=[17.1912,0.0341345,-1.37098e-05,2.48452e-09,-1.69855e-13,-8596.92,-50.9474], Tmin=(1105.17,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-40.1981,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(432.353,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-Cd)H) + group(Cs-(Cds-O2d)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsOs) + group(Cds-CdsCsH) + group(Cds-OdCsH) + radical(RCCJ) + radical(C=C(O)CJ)"""),
)
species(
label = '[CH2]CC([C]=C(C)O)C=O(8273)',
structure = SMILES('[CH2]CC([C]=C(C)O)C=O'),
E0 = (38.727,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3615,1277.5,1000,2750,2800,2850,1350,1500,750,1050,1375,1000,1685,370,3000,3100,440,815,1455,1000,1380,1390,370,380,2900,435,2750,2850,1437.5,1250,1305,750,350,350,440,435,1725,2782.5,750,1395,475,1775,1000,200,800],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (126.153,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.332907,0.0994129,-0.000110991,6.93131e-08,-1.77894e-11,4810.33,35.4838], Tmin=(100,'K'), Tmax=(938.253,'K')), NASAPolynomial(coeffs=[13.2661,0.041436,-1.8301e-05,3.45233e-09,-2.40428e-13,2258.49,-29.2568], Tmin=(938.253,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(38.727,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(432.353,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-Cd)H) + group(Cs-(Cds-O2d)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsOs) + group(Cds-CdsCsH) + group(Cds-OdCsH) + radical(Cds_S) + radical(RCCJ)"""),
)
species(
label = 'CCC([C]=C(C)[O])C=O(8274)',
structure = SMILES('CCC([C]=C(C)[O])C=O'),
E0 = (-28.7146,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1685,370,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,1380,1390,370,380,2900,435,350,440,435,1725,2750,2850,1437.5,1250,1305,750,350,2782.5,750,1395,475,1775,1000,200,800,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (126.153,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.075314,0.0955463,-0.000111034,7.81964e-08,-2.32549e-11,-3312.17,34.2739], Tmin=(100,'K'), Tmax=(807.274,'K')), NASAPolynomial(coeffs=[9.68069,0.0472031,-2.12021e-05,4.00675e-09,-2.78261e-13,-4887.24,-10.7039], Tmin=(807.274,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-28.7146,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(436.51,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-Cd)H) + group(Cs-(Cds-O2d)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsOs) + group(Cds-CdsCsH) + group(Cds-OdCsH) + radical(C=C(C)OJ) + radical(Cds_S)"""),
)
species(
label = 'CCC([C]=O)C=C(C)[O](8275)',
structure = SMILES('CCC([C]=O)C=C(C)[O]'),
E0 = (-107.868,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (126.153,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.0856374,0.0888552,-8.576e-05,4.70139e-08,-1.08142e-11,-12834.8,34.3743], Tmin=(100,'K'), Tmax=(1029.48,'K')), NASAPolynomial(coeffs=[11.9605,0.0427156,-1.8532e-05,3.47842e-09,-2.41956e-13,-15279.8,-23.2602], Tmin=(1029.48,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-107.868,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(436.51,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-Cd)H) + group(Cs-(Cds-O2d)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsOs) + group(Cds-CdsCsH) + group(Cds-OdCsH) + radical(C=C(C)OJ) + radical(CC(C)CJ=O)"""),
)
species(
label = '[CH2]C[C](C=O)C=C(C)O(8276)',
structure = SMILES('[CH2]C[C](C=O)C=C(C)O'),
E0 = (-79.4142,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (126.153,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.387927,0.0849883,-6.21643e-05,1.88783e-08,-8.77441e-13,-9383.58,34.6546], Tmin=(100,'K'), Tmax=(1099.54,'K')), NASAPolynomial(coeffs=[18.4868,0.0325359,-1.27247e-05,2.31185e-09,-1.59642e-13,-14514.3,-62.6525], Tmin=(1099.54,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-79.4142,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(432.353,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-Cd)H) + group(Cs-(Cds-O2d)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsOs) + group(Cds-CdsCsH) + group(Cds-OdCsH) + radical(C=CCJ(C)C=O) + radical(RCCJ)"""),
)
species(
label = '[CH2][CH]C(C=O)C=C(C)O(8277)',
structure = SMILES('[CH2][CH]C(C=O)C=C(C)O'),
E0 = (0.787281,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,3615,1277.5,1000,3010,987.5,1337.5,450,1655,2750,2800,2850,1350,1500,750,1050,1375,1000,1380,1390,370,380,2900,435,3000,3100,440,815,1455,1000,350,440,435,1725,2782.5,750,1395,475,1775,1000,200,800],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (126.153,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.3109,0.092199,-8.7386e-05,4.40767e-08,-9.01166e-12,252.105,37.4143], Tmin=(100,'K'), Tmax=(1172.19,'K')), NASAPolynomial(coeffs=[16.3522,0.0353378,-1.46237e-05,2.69439e-09,-1.85863e-13,-3654.39,-45.6232], Tmin=(1172.19,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(0.787281,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(432.353,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-Cd)H) + group(Cs-(Cds-O2d)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsOs) + group(Cds-CdsCsH) + group(Cds-OdCsH) + radical(CCJCC=O) + radical(RCCJ)"""),
)
species(
label = '[CH2]CC([C]=O)C=C(C)O(8278)',
structure = SMILES('[CH2]CC([C]=O)C=C(C)O'),
E0 = (-40.4261,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3615,1277.5,1000,3010,987.5,1337.5,450,1655,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3100,440,815,1455,1000,1855,455,950,1380,1390,370,380,2900,435,2750,2850,1437.5,1250,1305,750,350,350,440,435,1725,200,800],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (126.153,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.378565,0.0952384,-9.48817e-05,5.05576e-08,-1.0913e-11,-4703.53,36.3195], Tmin=(100,'K'), Tmax=(1113.98,'K')), NASAPolynomial(coeffs=[16.1292,0.0359632,-1.50659e-05,2.79118e-09,-1.93149e-13,-8381.37,-45.1026], Tmin=(1113.98,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-40.4261,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(432.353,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-Cd)H) + group(Cs-(Cds-O2d)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsOs) + group(Cds-CdsCsH) + group(Cds-OdCsH) + radical(CC(C)CJ=O) + radical(RCCJ)"""),
)
species(
label = '[CH2]C([O])=CC(C=O)CC(8279)',
structure = SMILES('[CH2]C([O])=CC(C=O)CC'),
E0 = (-107.64,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (126.153,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.116438,0.0920411,-9.17875e-05,5.15116e-08,-1.19601e-11,-12799,34.63], Tmin=(100,'K'), Tmax=(1029.42,'K')), NASAPolynomial(coeffs=[13.0707,0.0408007,-1.71244e-05,3.15924e-09,-2.17601e-13,-15514.1,-29.3727], Tmin=(1029.42,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-107.64,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(436.51,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-Cd)H) + group(Cs-(Cds-O2d)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsOs) + group(Cds-CdsCsH) + group(Cds-OdCsH) + radical(C=C(O)CJ) + radical(C=C(C)OJ)"""),
)
species(
label = 'C2H4(T)(890)',
structure = SMILES('[CH2][CH2]'),
E0 = (318.146,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3000,3100,440,815,1455,1000,180,1436.54,1437.15,2688.96,2689.16],'cm^-1')),
HinderedRotor(inertia=(0.0257549,'amu*angstrom^2'), symmetry=1, barrier=(17.2441,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (28.0532,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.40736,0.0100312,6.40927e-06,-1.41291e-08,5.92671e-12,38288.2,6.11703], Tmin=(100,'K'), Tmax=(954.26,'K')), NASAPolynomial(coeffs=[5.52249,0.00856173,-2.90743e-06,5.02353e-10,-3.44572e-14,37547.8,-5.75276], Tmin=(954.26,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(318.146,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(128.874,'J/(mol*K)'), label="""C2H4(T)""", comment="""Thermo library: DFT_QCI_thermo"""),
)
species(
label = '[CH]=C(C)[O](3732)',
structure = SMILES('[CH]=C(C)[O]'),
E0 = (198.59,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([350,440,435,1725,2750,2800,2850,1350,1500,750,1050,1375,1000,3120,650,792.5,1650],'cm^-1')),
HinderedRotor(inertia=(0.320495,'amu*angstrom^2'), symmetry=1, barrier=(7.36881,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (56.0633,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.64262,0.0274444,-2.15887e-05,9.3379e-09,-1.65054e-12,23935.7,14.2603], Tmin=(100,'K'), Tmax=(1341.7,'K')), NASAPolynomial(coeffs=[7.83004,0.0119796,-4.29975e-06,7.47552e-10,-4.99368e-14,22543.6,-12.2909], Tmin=(1341.7,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(198.59,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(224.491,'J/(mol*K)'), comment="""Thermo library: DFT_QCI_thermo + radical(C=C(C)OJ) + radical(Cds_P)"""),
)
species(
label = '[CH2]C[CH]C=O(6110)',
structure = SMILES('[CH2]CC=C[O]'),
E0 = (121.395,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,2995,3025,975,1000,1300,1375,400,500,1630,1680,3000,3100,440,815,1455,1000,439.352,440.116],'cm^-1')),
HinderedRotor(inertia=(0.132298,'amu*angstrom^2'), symmetry=1, barrier=(18.3159,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.132458,'amu*angstrom^2'), symmetry=1, barrier=(18.2855,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (70.0898,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.06392,0.0297838,1.93326e-05,-5.14894e-08,2.33454e-11,14681.8,20.083], Tmin=(100,'K'), Tmax=(942.939,'K')), NASAPolynomial(coeffs=[13.9169,0.0116798,-3.05432e-06,5.27558e-10,-4.05895e-14,11016,-43.9895], Tmin=(942.939,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(121.395,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(249.434,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-Cd)H) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsOsH) + radical(C=COJ) + radical(RCCJ)"""),
)
species(
label = '[CH2]CC(C=O)C1O[C]1C(8280)',
structure = SMILES('[CH2]CC(C=O)C1O[C]1C'),
E0 = (85.5967,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (126.153,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.218847,0.0911506,-9.27299e-05,5.57475e-08,-1.37493e-11,10448.2,34.6342], Tmin=(100,'K'), Tmax=(982.239,'K')), NASAPolynomial(coeffs=[12.4429,0.0395883,-1.39882e-05,2.30413e-09,-1.46993e-13,7960.79,-26.2244], Tmin=(982.239,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(85.5967,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(436.51,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(Cs-(Cds-O2d)CsCsH) + group(Cs-CsCsOsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-OdCsH) + ring(Ethylene_oxide) + radical(C2CsJO) + radical(RCCJ)"""),
)
species(
label = 'CC1([O])[CH]C(C=O)CC1(8281)',
structure = SMILES('CC1([O])[CH]C(C=O)CC1'),
E0 = (10.5697,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (126.153,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.613634,0.0564404,1.8504e-05,-6.32644e-08,2.78832e-11,1409.24,32.1362], Tmin=(100,'K'), Tmax=(981.959,'K')), NASAPolynomial(coeffs=[16.7904,0.0335743,-1.22976e-05,2.27292e-09,-1.63527e-13,-3842.32,-56.1764], Tmin=(981.959,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(10.5697,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(448.981,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsH) + group(Cs-CsCsCsOs) + group(Cs-(Cds-O2d)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cds-OdCsH) + ring(Cyclopentane) + radical(CCJCO) + radical(CC(C)2OJ)"""),
)
species(
label = 'CC([O])=CC1[CH]OCC1(8152)',
structure = SMILES('CC([O])=CC1[CH]OCC1'),
E0 = (-58.6355,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (126.153,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.297433,0.0643566,8.90539e-06,-6.97647e-08,3.66515e-11,-6902.77,28.3385], Tmin=(100,'K'), Tmax=(888.882,'K')), NASAPolynomial(coeffs=[19.6755,0.0247127,-4.44978e-06,4.43418e-10,-2.36917e-14,-12226.5,-73.4354], Tmin=(888.882,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-58.6355,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(448.981,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-(Cds-Cd)H) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsOsHH) + group(Cs-CsOsHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsOs) + group(Cds-CdsCsH) + ring(Tetrahydrofuran) + radical(C=C(C)OJ) + radical(CCsJOCs)"""),
)
species(
label = '[CH2]CC1[CH]OOC(C)=C1(8282)',
structure = SMILES('[CH2]CC1[CH]OOC(C)=C1'),
E0 = (256.582,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (126.153,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.307346,0.0706121,-3.12814e-05,-5.29492e-09,6.14415e-12,31001.8,29.9323], Tmin=(100,'K'), Tmax=(1043.12,'K')), NASAPolynomial(coeffs=[13.7539,0.0391196,-1.48568e-05,2.65352e-09,-1.81599e-13,27104.6,-40.7414], Tmin=(1043.12,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(256.582,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(444.824,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-O2s(Cds-Cd)) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsOs) + group(Cds-CdsCsH) + ring(34dihydro12dioxin) + radical(CCsJOOC) + radical(RCCJ)"""),
)
species(
label = 'C=CC(C=O)C=C(C)O(8283)',
structure = SMILES('C=CC(C=O)C=C(C)O'),
E0 = (-277.714,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (126.153,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.19234,0.0913294,-8.57538e-05,4.33272e-08,-8.95944e-12,-33249.5,32.5133], Tmin=(100,'K'), Tmax=(1152.28,'K')), NASAPolynomial(coeffs=[15.2358,0.0377716,-1.60328e-05,2.98845e-09,-2.07356e-13,-36805,-44.1052], Tmin=(1152.28,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-277.714,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(436.51,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-Cd)H) + group(Cs-(Cds-O2d)CsCsH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsOs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-OdCsH) + group(Cds-CdsHH)"""),
)
species(
label = 'CH2(S)(21)(22)',
structure = SMILES('[CH2]'),
E0 = (419.862,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1369.36,2789.41,2993.36],'cm^-1')),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (14.0266,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[4.19195,-0.00230793,8.0509e-06,-6.60123e-09,1.95638e-12,50484.3,-0.754589], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.28556,0.00460255,-1.97412e-06,4.09548e-10,-3.34695e-14,50922.4,8.67684], Tmin=(1000,'K'), Tmax=(3000,'K'))], Tmin=(200,'K'), Tmax=(3000,'K'), E0=(419.862,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label="""CH2(S)""", comment="""Thermo library: Klippenstein_Glarborg2016"""),
)
species(
label = '[CH2]CC(C=O)C=C[O](8284)',
structure = SMILES('[CH2]CC(C=O)C=C[O]'),
E0 = (-15.8606,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,2782.5,750,1395,475,1775,1000,1380,1390,370,380,2900,435,3000,3100,440,815,1455,1000,2995,3025,975,1000,1300,1375,400,500,1630,1680,246.378,246.409,246.829],'cm^-1')),
HinderedRotor(inertia=(0.00278503,'amu*angstrom^2'), symmetry=1, barrier=(0.119627,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.00278676,'amu*angstrom^2'), symmetry=1, barrier=(0.119627,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.444213,'amu*angstrom^2'), symmetry=1, barrier=(19.0158,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.444414,'amu*angstrom^2'), symmetry=1, barrier=(19.0149,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (112.127,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.243822,0.0750511,-6.468e-05,2.87614e-08,-5.12457e-12,-1766.12,32.3358], Tmin=(100,'K'), Tmax=(1345.02,'K')), NASAPolynomial(coeffs=[16.5988,0.0264127,-1.04373e-05,1.87584e-09,-1.2734e-13,-6165.68,-51.4153], Tmin=(1345.02,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-15.8606,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(365.837,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-Cd)H) + group(Cs-(Cds-O2d)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-OdCsH) + group(Cds-CdsOsH) + radical(C=COJ) + radical(RCCJ)"""),
)
species(
label = 'CO(10)(11)',
structure = SMILES('[C-]#[O+]'),
E0 = (-119.219,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2084.51],'cm^-1')),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (28.0101,'amu'),
collisionModel = TransportData(shapeIndex=1, epsilon=(762.44,'J/mol'), sigma=(3.69,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(1.76,'angstroms^3'), rotrelaxcollnum=4.0, comment="""PrimaryTransportLibrary"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.5971,-0.00102424,2.83336e-06,-1.75825e-09,3.42587e-13,-14343.2,3.45822], Tmin=(100,'K'), Tmax=(1669.93,'K')), NASAPolynomial(coeffs=[2.92796,0.00181931,-8.35308e-07,1.51269e-10,-9.88872e-15,-14292.7,6.51157], Tmin=(1669.93,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-119.219,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(37.4151,'J/(mol*K)'), label="""CO""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = '[CH2]CCC=C(C)[O](8285)',
structure = SMILES('[CH2]CCC=C(C)[O]'),
E0 = (52.165,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,3000,3100,440,815,1455,1000,2750,2800,2850,1350,1500,750,1050,1375,1000,350,440,435,1725,3010,987.5,1337.5,450,1655,180,180,4000],'cm^-1')),
HinderedRotor(inertia=(0.605003,'amu*angstrom^2'), symmetry=1, barrier=(13.9102,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.253526,'amu*angstrom^2'), symmetry=1, barrier=(5.82907,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0336607,'amu*angstrom^2'), symmetry=1, barrier=(13.9133,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.024078,'amu*angstrom^2'), symmetry=1, barrier=(13.9131,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (98.143,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.56587,0.0683281,-5.13449e-05,2.04882e-08,-3.34809e-12,6403.5,29.8053], Tmin=(100,'K'), Tmax=(1435.99,'K')), NASAPolynomial(coeffs=[14.225,0.0302799,-1.16003e-05,2.03632e-09,-1.3567e-13,2480.67,-41.0343], Tmin=(1435.99,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(52.165,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(390.78,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-Cd)H) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsOs) + group(Cds-CdsCsH) + radical(RCCJ) + radical(C=C(C)OJ)"""),
)
species(
label = 'CC1=CC(C=O)CCO1(8286)',
structure = SMILES('CC1=CC(C=O)CCO1'),
E0 = (-309.653,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (126.153,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.602552,0.0613302,-3.90956e-06,-3.53158e-08,1.72502e-11,-37108.5,25.1686], Tmin=(100,'K'), Tmax=(989.838,'K')), NASAPolynomial(coeffs=[14.1525,0.0371573,-1.36241e-05,2.44162e-09,-1.69821e-13,-41289.2,-47.6319], Tmin=(989.838,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-309.653,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(448.981,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-Cs(Cds-Cd)) + group(Cs-(Cds-O2d)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsOsHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsOs) + group(Cds-CdsCsH) + group(Cds-OdCsH) + ring(3,4-Dihydro-2H-pyran)"""),
)
species(
label = '[CH2]CO[CH]C=CC(C)=O(8132)',
structure = SMILES('[CH2]COC=CC=C(C)[O]'),
E0 = (-34.9253,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,2750,2800,2850,1350,1500,750,1050,1375,1000,2995,3010,3025,975,987.5,1000,1300,1337.5,1375,400,450,500,1630,1655,1680,350,440,435,1725,200,800,1066.67,1333.33,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (126.153,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(4230.1,'J/mol'), sigma=(7.01879,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=660.73 K, Pc=27.76 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-2.3661,0.111352,-0.000112208,5.44004e-08,-9.86559e-12,-3946.78,38.1853], Tmin=(100,'K'), Tmax=(1548.44,'K')), NASAPolynomial(coeffs=[29.2647,0.0134263,-1.63648e-06,3.17452e-11,4.33387e-15,-11798.4,-121.968], Tmin=(1548.44,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-34.9253,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(436.51,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-Cs(Cds-Cd)) + group(O2s-(Cds-Cd)H) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsOs) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsOsH) + radical(CJCO) + radical(C=C(C)OJ)"""),
)
species(
label = '[CH2]CC=COC=C(C)[O](8287)',
structure = SMILES('[CH2]CC=COC=C(C)[O]'),
E0 = (3.60971,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,2750,2800,2850,1350,1500,750,1050,1375,1000,350,440,435,1725,2995,3010,3025,975,987.5,1000,1300,1337.5,1375,400,450,500,1630,1655,1680,200,800,1066.67,1333.33,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (126.153,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.612217,0.0892244,-6.43498e-05,1.16185e-08,4.38356e-12,611.114,37.4621], Tmin=(100,'K'), Tmax=(976.907,'K')), NASAPolynomial(coeffs=[20.9861,0.0270898,-9.32845e-06,1.62946e-09,-1.12739e-13,-4863.8,-72.656], Tmin=(976.907,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(3.60971,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(436.51,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-Cd)(Cds-Cd)) + group(O2s-(Cds-Cd)H) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsOs) + group(Cds-CdsCsH) + group(Cds-CdsOsH) + group(Cds-CdsOsH) + radical(RCCJ) + radical(C=C(C)OJ)"""),
)
species(
label = '[CH2]CC(C=C(C)[O])=CO(8288)',
structure = SMILES('[CH2]CC(C=C(C)[O])=CO'),
E0 = (-82.6405,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3615,1277.5,1000,2995,3025,975,1000,1300,1375,400,500,1630,1680,2750,2800,2850,1350,1500,750,1050,1375,1000,325,375,415,465,420,450,1700,1750,3000,3100,440,815,1455,1000,2750,2850,1437.5,1250,1305,750,350,200,800,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (126.153,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-2.35662,0.110297,-0.000111497,5.43138e-08,-9.86339e-12,-9685.18,39.351], Tmin=(100,'K'), Tmax=(1560.69,'K')), NASAPolynomial(coeffs=[28.6787,0.0130927,-1.09673e-06,-9.60461e-11,1.38226e-14,-17221.4,-117.3], Tmin=(1560.69,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-82.6405,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(436.51,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-Cd)H) + group(O2s-(Cds-Cd)H) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsOs) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsOsH) + radical(RCCJ) + radical(C=C(C)OJ)"""),
)
species(
label = 'CH2(17)(18)',
structure = SMILES('[CH2]'),
E0 = (381.563,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1032.72,2936.3,3459],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (14.0266,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.8328,0.000224446,4.68033e-06,-6.04743e-09,2.59009e-12,45920.8,1.40666], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[3.16229,0.00281798,-7.56235e-07,5.05446e-11,5.65236e-15,46099.1,4.77656], Tmin=(1000,'K'), Tmax=(3000,'K'))], Tmin=(200,'K'), Tmax=(3000,'K'), E0=(381.563,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label="""CH2""", comment="""Thermo library: Klippenstein_Glarborg2016"""),
)
species(
label = '[CH2]C(C=O)C=C(C)[O](8289)',
structure = SMILES('[CH2]C(C=O)C=C(C)[O]'),
E0 = (-32.2653,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3010,987.5,1337.5,450,1655,2750,2800,2850,1350,1500,750,1050,1375,1000,1380,1390,370,380,2900,435,3000,3100,440,815,1455,1000,350,440,435,1725,2782.5,750,1395,475,1775,1000,180,180],'cm^-1')),
HinderedRotor(inertia=(0.269022,'amu*angstrom^2'), symmetry=1, barrier=(6.18534,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.268774,'amu*angstrom^2'), symmetry=1, barrier=(6.17964,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.269081,'amu*angstrom^2'), symmetry=1, barrier=(6.18671,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.268781,'amu*angstrom^2'), symmetry=1, barrier=(6.1798,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (112.127,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.29435,0.086515,-0.000109534,8.05851e-08,-2.44109e-11,-3751.8,30.5746], Tmin=(100,'K'), Tmax=(800.711,'K')), NASAPolynomial(coeffs=[10.238,0.0368371,-1.64636e-05,3.08936e-09,-2.13099e-13,-5344.07,-15.187], Tmin=(800.711,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-32.2653,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(365.837,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-Cd)H) + group(Cs-(Cds-O2d)CsCsH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsOs) + group(Cds-CdsCsH) + group(Cds-OdCsH) + radical(C=C(C)OJ) + radical(CJC(C)C=O)"""),
)
species(
label = 'O(4)(4)',
structure = SMILES('[O]'),
E0 = (243.005,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (15.9994,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(665.16,'J/mol'), sigma=(2.75,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,9.24385e-15,-1.3678e-17,6.66185e-21,-1.00107e-24,29226.7,5.11107], Tmin=(100,'K'), Tmax=(3459.6,'K')), NASAPolynomial(coeffs=[2.5,9.20456e-12,-3.58608e-15,6.15199e-19,-3.92042e-23,29226.7,5.11107], Tmin=(3459.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(243.005,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""O""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = '[CH2]CC(C=O)C=[C]C(8290)',
structure = SMILES('[CH2]CC(C=O)C=[C]C'),
E0 = (253.286,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1685,370,3010,987.5,1337.5,450,1655,2750,2800,2850,1350,1500,750,1050,1375,1000,1380,1390,370,380,2900,435,3000,3100,440,815,1455,1000,2750,2850,1437.5,1250,1305,750,350,2782.5,750,1395,475,1775,1000,204.57,1900.91],'cm^-1')),
HinderedRotor(inertia=(0.00403665,'amu*angstrom^2'), symmetry=1, barrier=(0.119627,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.308863,'amu*angstrom^2'), symmetry=1, barrier=(9.19579,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.308906,'amu*angstrom^2'), symmetry=1, barrier=(9.19561,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.309562,'amu*angstrom^2'), symmetry=1, barrier=(9.19843,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.309848,'amu*angstrom^2'), symmetry=1, barrier=(9.19523,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (110.154,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.32225,0.0698132,-2.89024e-05,-6.45708e-08,7.53641e-11,30548.5,29.6528], Tmin=(100,'K'), Tmax=(481.7,'K')), NASAPolynomial(coeffs=[5.82802,0.0502101,-2.33266e-05,4.47891e-09,-3.13852e-13,29907.8,9.06111], Tmin=(481.7,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(253.286,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(411.566,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-O2d)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-OdCsH) + radical(RCCJ) + radical(Cds_S)"""),
)
species(
label = '[CH2]CC1C([O])C1C(C)=O(8291)',
structure = SMILES('[CH2]CC1C([O])C1C(C)=O'),
E0 = (81.3964,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (126.153,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0171711,0.0762138,-4.21152e-05,1.84722e-09,4.18985e-12,9944.67,33.8418], Tmin=(100,'K'), Tmax=(1079.79,'K')), NASAPolynomial(coeffs=[16.9004,0.0347117,-1.38676e-05,2.56201e-09,-1.78984e-13,5057.14,-54.7888], Tmin=(1079.79,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(81.3964,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(440.667,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsH) + group(Cs-CsCsCsH) + group(Cs-(Cds-O2d)CsCsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-(Cds-O2d)HHH) + group(Cds-OdCsCs) + ring(Cyclopropane) + radical(RCCJ) + radical(CC(C)OJ)"""),
)
species(
label = '[CH2]CC(C=O)=CC(C)=O(8292)',
structure = SMILES('[CH2]CC(C=O)=CC(C)=O'),
E0 = (-110.724,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2782.5,750,1395,475,1775,1000,3010,987.5,1337.5,450,1655,350,440,435,1725,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3100,440,815,1455,1000,2750,2850,1437.5,1250,1305,750,350,375,552.5,462.5,1710,180,4000],'cm^-1')),
HinderedRotor(inertia=(0.222861,'amu*angstrom^2'), symmetry=1, barrier=(5.12401,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.00600122,'amu*angstrom^2'), symmetry=1, barrier=(5.11601,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.919447,'amu*angstrom^2'), symmetry=1, barrier=(21.1399,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.919466,'amu*angstrom^2'), symmetry=1, barrier=(21.1403,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.222626,'amu*angstrom^2'), symmetry=1, barrier=(5.11862,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (125.145,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.816619,0.0784248,-7.60857e-05,5.15682e-08,-1.71227e-11,-13210,31.999], Tmin=(100,'K'), Tmax=(683.648,'K')), NASAPolynomial(coeffs=[4.46248,0.057092,-2.92768e-05,5.91964e-09,-4.28856e-13,-13708.5,15.7966], Tmin=(683.648,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-110.724,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(411.566,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-O2d)HHH) + group(Cd-CdCs(CO)) + group(Cds-O2d(Cds-Cds)Cs) + group(Cd-Cd(CO)H) + group(Cds-O2d(Cds-Cds)H) + radical(RCCJ)"""),
)
species(
label = 'HCO(14)(15)',
structure = SMILES('[CH]=O'),
E0 = (32.4782,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1131.19,1955.83,1955.83],'cm^-1')),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (29.018,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(4140.62,'J/mol'), sigma=(3.59,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[4.23755,-0.00332075,1.4003e-05,-1.3424e-08,4.37416e-12,3872.41,3.30835], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[3.92002,0.00252279,-6.71004e-07,1.05616e-10,-7.43798e-15,3653.43,3.58077], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(32.4782,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label="""HCO""", comment="""Thermo library: Klippenstein_Glarborg2016"""),
)
species(
label = '[CH2]CC=CC(C)=O(8293)',
structure = SMILES('[CH2]CC=CC(C)=O'),
E0 = (20.0816,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,2750,2800,2850,1350,1500,750,1050,1375,1000,375,552.5,462.5,1710,2995,3025,975,1000,1300,1375,400,500,1630,1680,180,4000],'cm^-1')),
HinderedRotor(inertia=(0.0103218,'amu*angstrom^2'), symmetry=1, barrier=(24.5389,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.24096,'amu*angstrom^2'), symmetry=1, barrier=(5.54015,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.241092,'amu*angstrom^2'), symmetry=1, barrier=(5.54317,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.06722,'amu*angstrom^2'), symmetry=1, barrier=(24.5375,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (97.1351,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.59493,0.0537584,-2.91091e-05,7.02036e-09,-6.53913e-13,2500.45,26.5584], Tmin=(100,'K'), Tmax=(2453.42,'K')), NASAPolynomial(coeffs=[21.6564,0.0210502,-9.11138e-06,1.58632e-09,-1.00185e-13,-7343.3,-88.2314], Tmin=(2453.42,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(20.0816,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(365.837,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-O2d)HHH) + group(Cds-CdsCsH) + group(Cds-O2d(Cds-Cds)Cs) + group(Cd-Cd(CO)H) + radical(RCCJ)"""),
)
species(
label = '[CH2]C[C](C=O)CC(C)=O(8294)',
structure = SMILES('[CH2]CC(=C[O])CC(C)=O'),
E0 = (-71.7937,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (126.153,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.303138,0.0794292,-4.02618e-05,-4.8566e-09,6.93773e-12,-8466.96,35.4357], Tmin=(100,'K'), Tmax=(1092.46,'K')), NASAPolynomial(coeffs=[20.2797,0.0322103,-1.40718e-05,2.74305e-09,-1.97889e-13,-14643.6,-73.3717], Tmin=(1092.46,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-71.7937,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(436.51,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-Cd)H) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-O2d)(Cds-Cds)HH) + group(Cs-CsHHH) + group(Cs-(Cds-O2d)HHH) + group(Cds-CdsCsCs) + group(Cds-OdCsCs) + group(Cds-CdsOsH) + radical(RCCJ) + radical(C=COJ)"""),
)
species(
label = '[CH2][CH]C(C=O)CC(C)=O(8295)',
structure = SMILES('[CH2][CH]C(C=O)CC(C)=O'),
E0 = (-12.9462,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,2750,2800,2850,1350,1500,750,1050,1375,1000,375,552.5,462.5,1710,1380,1390,370,380,2900,435,3000,3100,440,815,1455,1000,2750,2850,1437.5,1250,1305,750,350,2782.5,750,1395,475,1775,1000,252.962,899.303,1811.85],'cm^-1')),
HinderedRotor(inertia=(0.0936563,'amu*angstrom^2'), symmetry=1, barrier=(3.44479,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0936563,'amu*angstrom^2'), symmetry=1, barrier=(3.44479,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0936563,'amu*angstrom^2'), symmetry=1, barrier=(3.44479,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0936563,'amu*angstrom^2'), symmetry=1, barrier=(3.44479,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0936563,'amu*angstrom^2'), symmetry=1, barrier=(3.44479,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0936563,'amu*angstrom^2'), symmetry=1, barrier=(3.44479,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (126.153,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.386995,0.0860947,-8.90482e-05,5.92078e-08,-1.751e-11,-1432.88,36.8844], Tmin=(100,'K'), Tmax=(793.821,'K')), NASAPolynomial(coeffs=[7.20923,0.0517171,-2.40869e-05,4.65069e-09,-3.27784e-13,-2515.98,5.54649], Tmin=(793.821,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-12.9462,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(432.353,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-O2d)CsCsH) + group(Cs-CsCsHH) + group(Cs-(Cds-O2d)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-O2d)HHH) + group(Cds-OdCsCs) + group(Cds-OdCsH) + radical(RCCJ) + radical(CCJCC=O)"""),
)
species(
label = '[CH2]CC([C]=O)CC(C)=O(8296)',
structure = SMILES('[CH2]CC([C]=O)CC(C)=O'),
E0 = (-54.1596,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1855,455,950,2750,2800,2850,1350,1500,750,1050,1375,1000,1380,1390,370,380,2900,435,3000,3100,440,815,1455,1000,2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,375,552.5,462.5,1710,200,800,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (126.153,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.17115,0.0910019,-0.000103656,7.58021e-08,-2.41684e-11,-6382.26,36.3132], Tmin=(100,'K'), Tmax=(746.483,'K')), NASAPolynomial(coeffs=[7.57281,0.0513371,-2.39457e-05,4.60903e-09,-3.23545e-13,-7487.21,2.7692], Tmin=(746.483,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-54.1596,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(432.353,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-O2d)CsCsH) + group(Cs-CsCsHH) + group(Cs-(Cds-O2d)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-O2d)HHH) + group(Cds-OdCsCs) + group(Cds-OdCsH) + radical(CC(C)CJ=O) + radical(RCCJ)"""),
)
species(
label = '[CH2]CC(C=O)CC([CH2])=O(8297)',
structure = SMILES('[CH2]CC(C=O)CC(=C)[O]'),
E0 = (-47.9299,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([350,440,435,1725,2950,3100,1380,975,1025,1650,1380,1390,370,380,2900,435,3000,3100,440,815,1455,1000,2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,2782.5,750,1395,475,1775,1000,200,800,1200,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (126.153,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0251064,0.0883439,-8.04497e-05,4.0145e-08,-8.29317e-12,-5619.55,36.5009], Tmin=(100,'K'), Tmax=(1145.86,'K')), NASAPolynomial(coeffs=[13.8988,0.0397377,-1.68209e-05,3.12527e-09,-2.16281e-13,-8810.5,-32.5696], Tmin=(1145.86,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-47.9299,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(436.51,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-Cd)H) + group(Cs-(Cds-O2d)CsCsH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cds-CdsCsOs) + group(Cds-OdCsH) + group(Cds-CdsHH) + radical(RCCJ) + radical(C=C(C)OJ)"""),
)
species(
label = '[CH2]CC1[CH]OC1C(C)=O(8298)',
structure = SMILES('[CH2]CC1[CH]OC1C(C)=O'),
E0 = (61.2764,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (126.153,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.239002,0.0656907,3.87255e-06,-6.17222e-08,3.23854e-11,7521.16,33.7002], Tmin=(100,'K'), Tmax=(907.567,'K')), NASAPolynomial(coeffs=[19.4857,0.0263212,-6.19016e-06,8.58585e-10,-5.56352e-14,2155.5,-67.6011], Tmin=(907.567,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(61.2764,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(440.667,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(Cs-CsCsCsH) + group(Cs-(Cds-O2d)CsOsH) + group(Cs-CsCsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-(Cds-O2d)HHH) + group(Cds-OdCsCs) + ring(Oxetane) + radical(CCsJOCs) + radical(RCCJ)"""),
)
species(
label = 'C[C]1[CH]C(C=O)CCO1(8299)',
structure = SMILES('C[C]1[CH]C(C=O)CCO1'),
E0 = (-12.32,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (126.153,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.676536,0.0645684,-1.8017e-05,-1.8637e-08,1.17867e-11,-1354.36,29.2221], Tmin=(100,'K'), Tmax=(937.556,'K')), NASAPolynomial(coeffs=[11.056,0.0411983,-1.40858e-05,2.35908e-09,-1.55896e-13,-4219.76,-25.0852], Tmin=(937.556,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-12.32,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(448.981,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(Cs-(Cds-O2d)CsCsH) + group(Cs-CsCsOsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cds-OdCsH) + ring(Oxane) + radical(CCJCO) + radical(C2CsJOCs)"""),
)
species(
label = 'CCC(C=O)=CC(C)=O(8300)',
structure = SMILES('CCC(C=O)=CC(C)=O'),
E0 = (-315.971,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (126.153,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.68247,0.068613,-3.9292e-05,9.7319e-09,-9.41437e-13,-37938.3,26.9747], Tmin=(100,'K'), Tmax=(2183.43,'K')), NASAPolynomial(coeffs=[17.4268,0.0397697,-1.94768e-05,3.68172e-09,-2.48697e-13,-44813.6,-61.277], Tmin=(2183.43,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-315.971,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(436.51,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-O2d)HHH) + group(Cd-CdCs(CO)) + group(Cds-O2d(Cds-Cds)Cs) + group(Cd-Cd(CO)H) + group(Cds-O2d(Cds-Cds)H)"""),
)
species(
label = 'C=CC(C=O)CC(C)=O(8301)',
structure = SMILES('C=CC(C=O)CC(C)=O'),
E0 = (-291.447,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (126.153,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.760893,0.0808376,-6.31836e-05,7.24134e-09,1.85735e-11,-34945.5,31.1411], Tmin=(100,'K'), Tmax=(546.942,'K')), NASAPolynomial(coeffs=[6.21804,0.0540009,-2.54379e-05,4.93624e-09,-3.4891e-13,-35738,6.31876], Tmin=(546.942,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-291.447,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(436.51,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-O2d)CsCsH) + group(Cs-(Cds-O2d)CsHH) + group(Cs-(Cds-O2d)HHH) + group(Cds-OdCsCs) + group(Cds-CdsCsH) + group(Cds-OdCsH) + group(Cds-CdsHH)"""),
)
species(
label = '[CH2]CC([CH]C=O)C(C)=O(8131)',
structure = SMILES('[CH2]CC(C=C[O])C(C)=O'),
E0 = (-70.7314,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2995,3025,975,1000,1300,1375,400,500,1630,1680,2750,2800,2850,1350,1500,750,1050,1375,1000,1380,1390,370,380,2900,435,3000,3100,440,815,1455,1000,2750,2850,1437.5,1250,1305,750,350,375,552.5,462.5,1710,200,800,1200,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (126.153,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(4421.26,'J/mol'), sigma=(7.20031,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=690.59 K, Pc=26.87 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.301697,0.0890858,-7.69914e-05,3.4881e-08,-6.40823e-12,-8347.66,35.8825], Tmin=(100,'K'), Tmax=(1294.71,'K')), NASAPolynomial(coeffs=[17.2,0.0350154,-1.43487e-05,2.62588e-09,-1.80086e-13,-12879.7,-53.0738], Tmin=(1294.71,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-70.7314,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(436.51,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-Cd)H) + group(Cs-(Cds-O2d)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-(Cds-O2d)HHH) + group(Cds-OdCsCs) + group(Cds-CdsCsH) + group(Cds-CdsOsH) + radical(RCCJ) + radical(C=COJ)"""),
)
species(
label = '[CH2]C[CH]C(C=O)C(C)=O(8239)',
structure = SMILES('[CH2]C[CH]C(C=O)C(C)=O'),
E0 = (-12.6465,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,2750,2800,2850,1350,1500,750,1050,1375,1000,375,552.5,462.5,1710,1380,1390,370,380,2900,435,3000,3100,440,815,1455,1000,2750,2850,1437.5,1250,1305,750,350,2782.5,750,1395,475,1775,1000,194.364,1057.99,1891.37],'cm^-1')),
HinderedRotor(inertia=(0.0777054,'amu*angstrom^2'), symmetry=1, barrier=(2.8985,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0777054,'amu*angstrom^2'), symmetry=1, barrier=(2.8985,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0777054,'amu*angstrom^2'), symmetry=1, barrier=(2.8985,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0777054,'amu*angstrom^2'), symmetry=1, barrier=(2.8985,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0777054,'amu*angstrom^2'), symmetry=1, barrier=(2.8985,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0777054,'amu*angstrom^2'), symmetry=1, barrier=(2.8985,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (126.153,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.35373,0.0746938,-5.00849e-05,1.68384e-08,-2.3167e-12,-1385.72,38.6044], Tmin=(100,'K'), Tmax=(1651.38,'K')), NASAPolynomial(coeffs=[16.1964,0.0363193,-1.5228e-05,2.76657e-09,-1.86374e-13,-6618.15,-45.7741], Tmin=(1651.38,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-12.6465,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(432.353,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-(Cds-O2d)HHH) + group(Cds-OdCsCs) + group(Cds-OdCsH) + radical(CCJCC=O) + radical(RCCJ)"""),
)
species(
label = '[CH2]CC(C=O)C(C)[C]=O(8302)',
structure = SMILES('[CH2]CC(C=O)C(C)[C]=O'),
E0 = (-28.8296,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1855,455,950,2750,2800,2850,1350,1500,750,1050,1375,1000,1380,1383.33,1386.67,1390,370,373.333,376.667,380,2800,3000,430,440,3000,3100,440,815,1455,1000,2750,2850,1437.5,1250,1305,750,350,2782.5,750,1395,475,1775,1000,200,800],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (126.153,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.316208,0.0868866,-8.51035e-05,5.028e-08,-1.29404e-11,-3339.75,36.3123], Tmin=(100,'K'), Tmax=(909.679,'K')), NASAPolynomial(coeffs=[8.97149,0.0488288,-2.23499e-05,4.29138e-09,-3.01958e-13,-4914.49,-4.62524], Tmin=(909.679,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-28.8296,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(432.353,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-O2d)CsCsH) + group(Cs-(Cds-O2d)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-OdCsH) + group(Cds-OdCsH) + radical(RCCJ) + radical(CC(C)CJ=O)"""),
)
species(
label = 'CC(=O)C1CCC1C=O(8133)',
structure = SMILES('CC(=O)C1CCC1C=O'),
E0 = (-265.678,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (126.153,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.501106,0.0652541,-2.12591e-05,-1.11198e-08,6.64726e-12,-31818,30.9163], Tmin=(100,'K'), Tmax=(1134.09,'K')), NASAPolynomial(coeffs=[13.777,0.0401509,-1.67868e-05,3.14024e-09,-2.19296e-13,-36226.1,-40.9612], Tmin=(1134.09,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-265.678,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(444.824,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-O2d)CsCsH) + group(Cs-(Cds-O2d)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-(Cds-O2d)HHH) + group(Cds-OdCsCs) + group(Cds-OdCsH) + ring(Cyclobutane)"""),
)
species(
label = 'Ne',
structure = SMILES('[Ne]'),
E0 = (-6.19738,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (20.1797,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1235.53,'J/mol'), sigma=(3.758e-10,'m'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with fixed Lennard Jones Parameters. This is the fallback method! Try improving transport databases!"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-6.19738,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""Ne""", comment="""Thermo library: primaryThermoLibrary"""),
)
species(
label = 'N2',
structure = SMILES('N#N'),
E0 = (-8.69489,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (28.0135,'amu'),
collisionModel = TransportData(shapeIndex=1, epsilon=(810.913,'J/mol'), sigma=(3.621,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(1.76,'angstroms^3'), rotrelaxcollnum=4.0, comment="""PrimaryTransportLibrary"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.61263,-0.00100893,2.49898e-06,-1.43376e-09,2.58636e-13,-1051.1,2.6527], Tmin=(100,'K'), Tmax=(1817.04,'K')), NASAPolynomial(coeffs=[2.9759,0.00164141,-7.19722e-07,1.25378e-10,-7.91526e-15,-1025.84,5.53757], Tmin=(1817.04,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-8.69489,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(37.4151,'J/(mol*K)'), label="""N2""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = 'Ar(8)',
structure = SMILES('[Ar]'),
E0 = (-6.19426,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (39.348,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1134.93,'J/mol'), sigma=(3.33,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,9.24385e-15,-1.3678e-17,6.66185e-21,-1.00107e-24,-745,4.3663], Tmin=(100,'K'), Tmax=(3459.6,'K')), NASAPolynomial(coeffs=[2.5,9.20456e-12,-3.58608e-15,6.15199e-19,-3.92042e-23,-745,4.3663], Tmin=(3459.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-6.19426,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""Ar""", comment="""Thermo library: BurkeH2O2"""),
)
transitionState(
label = 'TS1',
E0 = (-61.31,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS2',
E0 = (60.0158,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS3',
E0 = (31.2339,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS4',
E0 = (18.0336,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS5',
E0 = (81.9249,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS6',
E0 = (-24.9952,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS7',
E0 = (155.95,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS8',
E0 = (90.569,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS9',
E0 = (60.0262,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS10',
E0 = (162.377,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS11',
E0 = (189.072,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS12',
E0 = (15.594,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS13',
E0 = (14.7146,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS14',
E0 = (-23.3611,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS15',
E0 = (95.3999,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS16',
E0 = (54.1865,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS17',
E0 = (-5.66277,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS18',
E0 = (231.958,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS19',
E0 = (322.946,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS20',
E0 = (169.906,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS21',
E0 = (10.5697,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS22',
E0 = (-23.4417,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS23',
E0 = (256.582,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS24',
E0 = (-36.3367,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS25',
E0 = (404.001,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS26',
E0 = (276.871,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS27',
E0 = (-53.1093,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS28',
E0 = (278.875,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS29',
E0 = (317.41,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS30',
E0 = (61.2262,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS31',
E0 = (349.298,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS32',
E0 = (496.29,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS33',
E0 = (81.3964,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS34',
E0 = (111.888,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS35',
E0 = (77.3972,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS36',
E0 = (57.2995,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS37',
E0 = (70.3853,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS38',
E0 = (111.597,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS39',
E0 = (70.384,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS40',
E0 = (93.4893,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS41',
E0 = (65.4492,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS42',
E0 = (26.7723,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS43',
E0 = (16.9371,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS44',
E0 = (27.6586,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS45',
E0 = (108.045,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS46',
E0 = (110.104,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS47',
E0 = (165.936,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS48',
E0 = (-53.4022,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
reaction(
label = 'reaction1',
reactants = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
products = ['C2H4(19)(20)', 'S(247)(246)'],
transitionState = 'TS1',
kinetics = Arrhenius(A=(5e+12,'s^-1'), n=0, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Exact match found for rate rule [RJJ]
Euclidian distance = 0
family: 1,4_Linear_birad_scission"""),
)
reaction(
label = 'reaction2',
reactants = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
products = ['CC(=O)C1CCC1[CH][O](8203)'],
transitionState = 'TS2',
kinetics = Arrhenius(A=(1.20551e+07,'s^-1'), n=1.225, Ea=(121.326,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R5_SS_D;doublebond_intra;radadd_intra_cs2H]
Euclidian distance = 0
family: Intra_R_Add_Exocyclic
Ea raised from 118.9 to 121.3 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction3',
reactants = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
products = ['CC([O])=CC1CCC1[O](8266)'],
transitionState = 'TS3',
kinetics = Arrhenius(A=(1.46159e+06,'s^-1'), n=1.55572, Ea=(92.5439,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5_SS;multiplebond_intra;radadd_intra_cs2H] for rate rule [R5_SS_CO;carbonylbond_intra_H;radadd_intra_cs2H]
Euclidian distance = 2.2360679775
family: Intra_R_Add_Exocyclic
Ea raised from 88.6 to 92.5 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction4',
reactants = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
products = ['[CH2]CC1C=C(C)OC1[O](8267)'],
transitionState = 'TS4',
kinetics = Arrhenius(A=(3.49749e+08,'s^-1'), n=0.656505, Ea=(79.3435,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R6_SMS;multiplebond_intra;radadd_intra] for rate rule [R6_SMS_CO;carbonylbond_intra_H;radadd_intra_O]
Euclidian distance = 2.44948974278
family: Intra_R_Add_Exocyclic"""),
)
reaction(
label = 'reaction5',
reactants = ['H(3)(3)', 'C=CC(C=O)C=C(C)[O](8268)'],
products = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
transitionState = 'TS5',
kinetics = Arrhenius(A=(3.01e+08,'cm^3/(mol*s)'), n=1.6, Ea=(10.0416,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 10 used for Cds-CsH_Cds-HH;HJ
Exact match found for rate rule [Cds-CsH_Cds-HH;HJ]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction6',
reactants = ['C2H4(19)(20)', 'CC([O])=CC=C[O](7453)'],
products = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
transitionState = 'TS6',
kinetics = Arrhenius(A=(0.000699084,'m^3/(mol*s)'), n=2.81599, Ea=(19.131,'kJ/mol'), T0=(1,'K'), Tmin=(303.03,'K'), Tmax=(2000,'K'), comment="""Estimated using template [Cds-HH_Cds-HH;CsJ-TwoDeH] for rate rule [Cds-HH_Cds-HH;CsJ-CdCOH]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction7',
reactants = ['CH3(15)(16)', '[CH2]CC(C=O)C=C=O(8269)'],
products = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
transitionState = 'TS7',
kinetics = Arrhenius(A=(8.04e+06,'cm^3/(mol*s)'), n=1.68, Ea=(54.1828,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using template [Cdd_Od;CsJ-HHH] for rate rule [Ck_O;CsJ-HHH]
Euclidian distance = 1.0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction8',
reactants = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
products = ['C[CH]C(C=O)C=C(C)[O](8270)'],
transitionState = 'TS8',
kinetics = Arrhenius(A=(718000,'s^-1'), n=2.05, Ea=(151.879,'kJ/mol'), T0=(1,'K'), Tmin=(500,'K'), Tmax=(2000,'K'), comment="""From training reaction 147 used for R2H_S;C_rad_out_2H;Cs_H_out_H/NonDeC
Exact match found for rate rule [R2H_S;C_rad_out_2H;Cs_H_out_H/NonDeC]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction9',
reactants = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
products = ['CC[C](C=O)C=C(C)[O](8271)'],
transitionState = 'TS9',
kinetics = Arrhenius(A=(4.59157e+08,'s^-1'), n=1.13, Ea=(121.336,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R3H_SS_Cs;C_rad_out_2H;Cs_H_out_noH]
Euclidian distance = 0
family: intra_H_migration"""),
)
reaction(
label = 'reaction10',
reactants = ['[CH2]CC(C=O)C=C([CH2])O(8272)'],
products = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
transitionState = 'TS10',
kinetics = Arrhenius(A=(5.4947e+07,'s^-1'), n=1.58167, Ea=(202.575,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R3H_SS_2Cd;C_rad_out_2H;XH_out] for rate rule [R3H_SS_2Cd;C_rad_out_2H;O_H_out]
Euclidian distance = 1.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction11',
reactants = ['[CH2]CC([C]=C(C)O)C=O(8273)'],
products = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
transitionState = 'TS11',
kinetics = Arrhenius(A=(4.96975e+09,'s^-1'), n=0.933333, Ea=(150.345,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R3H_DS;Cd_rad_out_Cs;XH_out] for rate rule [R3H_DS;Cd_rad_out_Cs;O_H_out]
Euclidian distance = 1.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction12',
reactants = ['CCC([C]=C(C)[O])C=O(8274)'],
products = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
transitionState = 'TS12',
kinetics = Arrhenius(A=(111300,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4H_RSS;Cd_rad_out;Cs_H_out] for rate rule [R4H_SSS;Cd_rad_out_Cd;Cs_H_out_2H]
Euclidian distance = 2.44948974278
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction13',
reactants = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
products = ['CCC([C]=O)C=C(C)[O](8275)'],
transitionState = 'TS13',
kinetics = Arrhenius(A=(463.959,'s^-1'), n=2.50105, Ea=(76.0245,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4H_SSS;C_rad_out_2H;XH_out] for rate rule [R4H_SSS;C_rad_out_2H;CO_H_out]
Euclidian distance = 1.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction14',
reactants = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
products = ['[CH2]C[C](C=O)C=C(C)O(8276)'],
transitionState = 'TS14',
kinetics = Arrhenius(A=(0.00181,'s^-1'), n=4.25, Ea=(37.9489,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4H;Y_rad_out;Cs_H_out_OneDe] for rate rule [R4H_SDS;O_rad_out;Cs_H_out_CO]
Euclidian distance = 2.44948974278
family: intra_H_migration"""),
)
reaction(
label = 'reaction15',
reactants = ['[CH2][CH]C(C=O)C=C(C)O(8277)'],
products = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
transitionState = 'TS15',
kinetics = Arrhenius(A=(722272,'s^-1'), n=1.6737, Ea=(94.6126,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5H_SSMS;Y_rad_out;XH_out] for rate rule [R5H_SSMS;Y_rad_out;O_H_out]
Euclidian distance = 1.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction16',
reactants = ['[CH2]CC([C]=O)C=C(C)O(8278)'],
products = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
transitionState = 'TS16',
kinetics = Arrhenius(A=(722272,'s^-1'), n=1.6737, Ea=(94.6126,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5H_SSMS;Y_rad_out;XH_out] for rate rule [R5H_SSMS;CO_rad_out;O_H_out]
Euclidian distance = 1.41421356237
family: intra_H_migration"""),
)
reaction(
label = 'reaction17',
reactants = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
products = ['[CH2]C([O])=CC(C=O)CC(8279)'],
transitionState = 'TS17',
kinetics = Arrhenius(A=(8010,'s^-1'), n=1.94, Ea=(55.6472,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 93 used for R6H_RSSMS;C_rad_out_2H;Cs_H_out_2H
Exact match found for rate rule [R6H_RSSMS;C_rad_out_2H;Cs_H_out_2H]
Euclidian distance = 0
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction18',
reactants = ['C2H4(T)(890)', 'CC([O])=CC=C[O](7453)'],
products = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
transitionState = 'TS18',
kinetics = Arrhenius(A=(5.07337e+06,'m^3/(mol*s)'), n=-0.0618178, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Y_rad;C_rad/H/TwoDe]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: R_Recombination
Ea raised from -8.9 to 0 kJ/mol."""),
)
reaction(
label = 'reaction19',
reactants = ['[CH]=C(C)[O](3732)', '[CH2]C[CH]C=O(6110)'],
products = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
transitionState = 'TS19',
kinetics = Arrhenius(A=(1.59671e+07,'m^3/(mol*s)'), n=0.0113737, Ea=(2.96199,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [C_rad/H/OneDeC;Y_rad] for rate rule [C_rad/H/OneDeC;Cd_rad]
Euclidian distance = 1.0
family: R_Recombination"""),
)
reaction(
label = 'reaction20',
reactants = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
products = ['[CH2]CC(C=O)C1O[C]1C(8280)'],
transitionState = 'TS20',
kinetics = Arrhenius(A=(3.473e+12,'s^-1'), n=0.247, Ea=(231.216,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R3_D;doublebond_intra_secNd;radadd_intra] for rate rule [R3_D;doublebond_intra_secNd_HNd;radadd_intra_O]
Euclidian distance = 1.41421356237
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction21',
reactants = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
products = ['CC1([O])[CH]C(C=O)CC1(8281)'],
transitionState = 'TS21',
kinetics = Arrhenius(A=(1.89094e+07,'s^-1'), n=0.979167, Ea=(71.8796,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R5_CsCs_RH_D;doublebond_intra_pri;radadd_intra_cs2H]
Euclidian distance = 0
family: Intra_R_Add_Endocyclic
Ea raised from 67.0 to 71.9 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction22',
reactants = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
products = ['CC([O])=CC1[CH]OCC1(8152)'],
transitionState = 'TS22',
kinetics = Arrhenius(A=(2.64784e+07,'s^-1'), n=0.990488, Ea=(37.8683,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5_SS;multiplebond_intra;radadd_intra_cs2H] for rate rule [R5_SS_CO;carbonyl_intra_H;radadd_intra_cs2H]
Euclidian distance = 2.2360679775
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction23',
reactants = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
products = ['[CH2]CC1[CH]OOC(C)=C1(8282)'],
transitionState = 'TS23',
kinetics = Arrhenius(A=(2.39072e+10,'s^-1'), n=0.346137, Ea=(317.892,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R6_SMS;multiplebond_intra;radadd_intra] for rate rule [R6_SMS_CO;carbonyl_intra_H;radadd_intra_O]
Euclidian distance = 2.44948974278
family: Intra_R_Add_Endocyclic
Ea raised from 314.7 to 317.9 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction24',
reactants = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
products = ['C=CC(C=O)C=C(C)O(8283)'],
transitionState = 'TS24',
kinetics = Arrhenius(A=(4.25221e+09,'s^-1'), n=0.137, Ea=(24.9733,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5;Y_rad;XH_Rrad] for rate rule [R5radExo;Y_rad;XH_Rrad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction25',
reactants = ['CH2(S)(21)(22)', '[CH2]CC(C=O)C=C[O](8284)'],
products = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
transitionState = 'TS25',
kinetics = Arrhenius(A=(71881.9,'m^3/(mol*s)'), n=0.444, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [carbene;R_H]
Euclidian distance = 0
family: 1,2_Insertion_carbene
Ea raised from -5.1 to 0 kJ/mol."""),
)
reaction(
label = 'reaction26',
reactants = ['CO(10)(11)', '[CH2]CCC=C(C)[O](8285)'],
products = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
transitionState = 'TS26',
kinetics = Arrhenius(A=(1.532e+06,'cm^3/(mol*s)'), n=2.07, Ea=(343.925,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using template [CO;C_sec] for rate rule [CO;C/H2/OneDeC]
Euclidian distance = 2.0
Multiplied by reaction path degeneracy 2.0
family: 1,2_Insertion_CO"""),
)
reaction(
label = 'reaction27',
reactants = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
products = ['CC1=CC(C=O)CCO1(8286)'],
transitionState = 'TS27',
kinetics = Arrhenius(A=(2.53377e+11,'s^-1'), n=0.0685, Ea=(8.20064,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [R6;C_rad_out_2H;Ypri_rad_out] + [R6_SSSDS;C_rad_out_single;Ypri_rad_out] for rate rule [R6_SSSDS;C_rad_out_2H;Opri_rad]
Euclidian distance = 1.41421356237
family: Birad_recombination"""),
)
reaction(
label = 'reaction28',
reactants = ['[CH2]CO[CH]C=CC(C)=O(8132)'],
products = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
transitionState = 'TS28',
kinetics = Arrhenius(A=(7040,'s^-1'), n=2.66, Ea=(313.8,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using template [R_ROR;R1_doublebond;R2_doublebond_H;R_O_C] for rate rule [R_ROR;R1_doublebond_CHR;R2_doublebond_H;R_O_C]
Euclidian distance = 1.0
family: ketoenol"""),
)
reaction(
label = 'reaction29',
reactants = ['[CH2]CC=COC=C(C)[O](8287)'],
products = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
transitionState = 'TS29',
kinetics = Arrhenius(A=(7040,'s^-1'), n=2.66, Ea=(313.8,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using template [R_ROR;R1_doublebond;R2_doublebond_H;R_O_C] for rate rule [R_ROR;R1_doublebond_CHR;R2_doublebond_H;R_O_C]
Euclidian distance = 1.0
family: ketoenol"""),
)
reaction(
label = 'reaction30',
reactants = ['[CH2]CC(C=C(C)[O])=CO(8288)'],
products = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
transitionState = 'TS30',
kinetics = Arrhenius(A=(605.045,'s^-1'), n=2.96, Ea=(143.867,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R_ROR;R1_doublebond;R2_doublebond_H;R_O_H]
Euclidian distance = 0
family: ketoenol"""),
)
reaction(
label = 'reaction31',
reactants = ['CH2(17)(18)', '[CH2]C(C=O)C=C(C)[O](8289)'],
products = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
transitionState = 'TS31',
kinetics = Arrhenius(A=(1.06732e+06,'m^3/(mol*s)'), n=0.472793, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Y_rad;Birad] for rate rule [C_rad/H2/Cs;Birad]
Euclidian distance = 3.0
family: Birad_R_Recombination
Ea raised from -3.5 to 0 kJ/mol."""),
)
reaction(
label = 'reaction32',
reactants = ['O(4)(4)', '[CH2]CC(C=O)C=[C]C(8290)'],
products = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
transitionState = 'TS32',
kinetics = Arrhenius(A=(2085.55,'m^3/(mol*s)'), n=1.09077, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(303.03,'K'), Tmax=(2000,'K'), comment="""Estimated using template [Y_rad;O_birad] for rate rule [Cd_rad/NonDe;O_birad]
Euclidian distance = 3.0
family: Birad_R_Recombination
Ea raised from -8.3 to 0 kJ/mol."""),
)
reaction(
label = 'reaction33',
reactants = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
products = ['[CH2]CC1C([O])C1C(C)=O(8291)'],
transitionState = 'TS33',
kinetics = Arrhenius(A=(2.90568e+10,'s^-1'), n=0.237, Ea=(142.706,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4_S;multiplebond_intra;radadd_intra_csHDe] for rate rule [R4_S_CO;carbonylbond_intra_H;radadd_intra_csHDe]
Euclidian distance = 2.2360679775
family: Intra_R_Add_Exocyclic
Ea raised from 140.2 to 142.7 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction34',
reactants = ['H(3)(3)', '[CH2]CC(C=O)=CC(C)=O(8292)'],
products = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
transitionState = 'TS34',
kinetics = Arrhenius(A=(72.3521,'m^3/(mol*s)'), n=1.66655, Ea=(10.8198,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Cds-OneDeCs_Cds;HJ] for rate rule [Cds-COCs_Cds;HJ]
Euclidian distance = 1.0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction35',
reactants = ['S(247)(246)', 'C2H4(T)(890)'],
products = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
transitionState = 'TS35',
kinetics = Arrhenius(A=(3.53019,'m^3/(mol*s)'), n=1.97633, Ea=(9.23266,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [Cds-OneDeH_Cds;CJ] + [Cds-COH_Cds;YJ] for rate rule [Cds-COH_Cds;CJ]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction36',
reactants = ['HCO(14)(15)', '[CH2]CC=CC(C)=O(8293)'],
products = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
transitionState = 'TS36',
kinetics = Arrhenius(A=(0.00329883,'m^3/(mol*s)'), n=2.49742, Ea=(4.73961,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Cds-CsH_Cds-OneDeH;CJ] for rate rule [Cds-CsH_Cds-COH;CO_pri_rad]
Euclidian distance = 2.2360679775
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction37',
reactants = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
products = ['[CH2]C[C](C=O)CC(C)=O(8294)'],
transitionState = 'TS37',
kinetics = Arrhenius(A=(2.76654e+08,'s^-1'), n=1.40703, Ea=(131.695,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R2H_S;C_rad_out_H/OneDe;Cs_H_out_OneDe] for rate rule [R2H_S;C_rad_out_H/OneDe;Cs_H_out_CO]
Euclidian distance = 1.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction38',
reactants = ['[CH2][CH]C(C=O)CC(C)=O(8295)'],
products = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
transitionState = 'TS38',
kinetics = Arrhenius(A=(1.29711e+07,'s^-1'), n=1.52333, Ea=(124.544,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R3H_SS_Cs;Y_rad_out;Cs_H_out_H/OneDe] for rate rule [R3H_SS_Cs;Y_rad_out;Cs_H_out_H/CO]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction39',
reactants = ['[CH2]CC([C]=O)CC(C)=O(8296)'],
products = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
transitionState = 'TS39',
kinetics = Arrhenius(A=(1.29711e+07,'s^-1'), n=1.52333, Ea=(124.544,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R3H_SS_Cs;Y_rad_out;Cs_H_out_H/OneDe] for rate rule [R3H_SS_Cs;CO_rad_out;Cs_H_out_H/CO]
Euclidian distance = 1.41421356237
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction40',
reactants = ['[CH2]CC(C=O)CC([CH2])=O(8297)'],
products = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
transitionState = 'TS40',
kinetics = Arrhenius(A=(1.064e+06,'s^-1'), n=1.93, Ea=(141.419,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R3H_SS;C_rad_out_2H;Cs_H_out_H/(NonDeC/Cs)]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction41',
reactants = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
products = ['[CH2]CC1[CH]OC1C(C)=O(8298)'],
transitionState = 'TS41',
kinetics = Arrhenius(A=(6.89861e+07,'s^-1'), n=1.13751, Ea=(126.759,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4_S;multiplebond_intra;radadd_intra_cs] for rate rule [R4_S_CO;carbonyl_intra_H;radadd_intra_csHCO]
Euclidian distance = 3.0
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction42',
reactants = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
products = ['C[C]1[CH]C(C=O)CCO1(8299)'],
transitionState = 'TS42',
kinetics = Arrhenius(A=(5.8912e+08,'s^-1'), n=0.529986, Ea=(88.0823,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R6_linear;multiplebond_intra;radadd_intra_cs2H] for rate rule [R6_linear;carbonyl_intra_Nd;radadd_intra_cs2H]
Euclidian distance = 2.0
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction43',
reactants = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
products = ['CCC(C=O)=CC(C)=O(8300)'],
transitionState = 'TS43',
kinetics = Arrhenius(A=(2.00399e+09,'s^-1'), n=0.37, Ea=(78.2471,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [R3;Y_rad;XH_Rrad_De] + [R3radExo;Y_rad;XH_Rrad] for rate rule [R3radExo;Y_rad;XH_Rrad_De]
Euclidian distance = 1.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction44',
reactants = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
products = ['C=CC(C=O)CC(C)=O(8301)'],
transitionState = 'TS44',
kinetics = Arrhenius(A=(5.2748e+09,'s^-1'), n=0.37, Ea=(88.9686,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [R3;Y_rad_De;XH_Rrad] + [R3radExo;Y_rad;XH_Rrad] for rate rule [R3radExo;Y_rad_De;XH_Rrad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction46',
reactants = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
products = ['[CH2]CC([CH]C=O)C(C)=O(8131)'],
transitionState = 'TS45',
kinetics = Arrhenius(A=(3.66547e+10,'s^-1'), n=0.732, Ea=(169.355,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [cCs(-HC)CJ;CsJ-OneDeH;C]
Euclidian distance = 0
family: 1,2_shiftC"""),
)
reaction(
label = 'reaction46',
reactants = ['[CH2]C[CH]C(C=O)C(C)=O(8239)'],
products = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
transitionState = 'TS46',
kinetics = Arrhenius(A=(8.889e+11,'s^-1'), n=0.232, Ea=(122.75,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [cCs(-HC)CJ;CsJ;CO] for rate rule [cCs(-HC)CJ;CsJ-CsH;CO]
Euclidian distance = 1.0
family: 1,2_shiftC"""),
)
reaction(
label = 'reaction47',
reactants = ['[CH2]CC(C=O)C(C)[C]=O(8302)'],
products = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
transitionState = 'TS47',
kinetics = Arrhenius(A=(5.59192e+09,'s^-1'), n=1.025, Ea=(194.765,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [cCs(-HC)CJ;CJ;CH3]
Euclidian distance = 0
family: 1,2_shiftC"""),
)
reaction(
label = 'reaction48',
reactants = ['[CH2]CC([CH]C(C)=O)C=O(8130)'],
products = ['CC(=O)C1CCC1C=O(8133)'],
transitionState = 'TS48',
kinetics = Arrhenius(A=(1.8e+12,'s^-1'), n=-0.1525, Ea=(7.90776,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [Rn;C_rad_out_H/OneDe;Cpri_rad_out_2H] + [R4_SSS;C_rad_out_single;Cpri_rad_out_2H] for rate rule [R4_SSS;C_rad_out_H/OneDe;Cpri_rad_out_2H]
Euclidian distance = 2.0
family: Birad_recombination"""),
)
network(
label = '742',
isomers = [
'[CH2]CC([CH]C(C)=O)C=O(8130)',
],
reactants = [
('C2H4(19)(20)', 'S(247)(246)'),
],
bathGas = {
'Ne': 0.333333,
'N2': 0.333333,
'Ar(8)': 0.333333,
},
)
pressureDependence(
label = '742',
Tmin = (300,'K'),
Tmax = (2000,'K'),
Tcount = 8,
Tlist = ([302.47,323.145,369.86,455.987,609.649,885.262,1353.64,1896.74],'K'),
Pmin = (0.01,'bar'),
Pmax = (100,'bar'),
Pcount = 5,
Plist = ([0.0125282,0.0667467,1,14.982,79.8202],'bar'),
maximumGrainSize = (0.5,'kcal/mol'),
minimumGrainCount = 250,
method = 'modified strong collision',
interpolationModel = ('Chebyshev', 6, 4),
activeKRotor = True,
activeJRotor = True,
rmgmode = True,
)
| [
"qin.she@husky.neu.edu"
] | qin.she@husky.neu.edu |
2686c10d717de9e0a0a843214f7f55cdae879bb8 | 937313d90f1cc8c0b4a6fa90f8ba33a9e31be71d | /events/contribution/doctype/contribution_item/contribution_item.py | 93d16ff6ec5d539d02ea947fa828b57b78ed84ab | [
"MIT"
] | permissive | bobzz-zone/korecent_gias | a7dafe2eaeecbd53f4a060e72098c5bfbba67bd5 | 4d456c6a4455b247cd6710f55bd7ebd30a615093 | refs/heads/master | 2021-01-20T05:19:03.577088 | 2017-08-28T13:07:21 | 2017-08-28T13:07:21 | 101,425,747 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2015, bobzz.zone@gmail.com and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class ContributionItem(Document):
pass
| [
"bobzz.zone@gmail.com"
] | bobzz.zone@gmail.com |
5ce633162ef77335b572b7931a14ef3f1d121526 | 45de3aa97525713e3a452c18dcabe61ac9cf0877 | /src/primaires/scripting/actions/changer_stat.py | 9be53ad6a93dcc43d22bf3b07946e41a7ff1d81f | [
"BSD-3-Clause"
] | permissive | stormi/tsunami | 95a6da188eadea3620c70f7028f32806ee2ec0d1 | bdc853229834b52b2ee8ed54a3161a1a3133d926 | refs/heads/master | 2020-12-26T04:27:13.578652 | 2015-11-17T21:32:38 | 2015-11-17T21:32:38 | 25,606,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,194 | py | # -*-coding:Utf-8 -*
# Copyright (c) 2014 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant l'action changer_stat."""
from primaires.scripting.action import Action
from primaires.scripting.instruction import ErreurExecution
class ClasseAction(Action):
"""Change la stat d'un personnage."""
@classmethod
def init_types(cls):
cls.ajouter_types(cls.changer_stat, "Personnage", "str", "Fraction")
@staticmethod
def changer_stat(personnage, nom_stat, valeur):
"""Modifie la stat d'un personnage.
Cette action doit être de préférence utilisée pour augmenter
les stats d'un personnage. Pour consommer des stats particulières
(comme la vitalité ou l'endurance), utilisez l'action
'consommer'.
Paramètres à préciser :
* personnage : le personnage dont on veut modifier la stat
* nom_stat : le nom de la stat
* valeur : la valeur de la nouvelle stat
Exemples d'utilisation :
changer_stat personnage "force" 90
changer_stat personnage "vitalite_max" 2000
changer_stat "mana" 80
Note : n'utilisez pas cette action pour tuer un personnage
(utilisez l'action 'tuer' pour ce faire).
Si vous voulez modifier la stat max d'un personnage, utilisez
"stat_max" (par exemple "vitalite_max"). Notez que les noms
des stats sont en minuscule et sans accent.
"""
valeur = int(valeur)
if valeur <= 0:
valeur = 1
if nom_stat not in personnage.stats:
raise ErreurExecution("stat {} inconnue".format(repr(nom_stat)))
setattr(personnage.stats, nom_stat, valeur)
| [
"stormi@laposte.net"
] | stormi@laposte.net |
5d1cfecb8fabf44a4d43ec619892469e0003e314 | 066e874cc6d72d82e098d81a220cbbb1d66948f7 | /migrations/versions/c7cdcf7845d7_.py | d523e12da43e9723bcb82b19f484e9cd93d3a248 | [] | no_license | webdeveloper001/flask-inboundlead | 776792485a998a0eaa4b14016c3a2066e75ff2a2 | d0a539d86342e9efc54d0c0a1adc02c609f0f762 | refs/heads/master | 2021-01-19T01:34:55.241144 | 2017-04-05T00:42:03 | 2017-04-05T00:42:03 | 87,248,885 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 681 | py | """empty message
Revision ID: c7cdcf7845d7
Revises: d2d6e1b48e4b
Create Date: 2017-03-06 17:09:02.886878
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c7cdcf7845d7'
down_revision = 'd2d6e1b48e4b'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('sales_rep', sa.Column('spreadSheetId', sa.String(length=120), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('sales_rep', 'spreadSheetId')
# ### end Alembic commands ###
| [
"jamesbrown1018@outlook.com"
] | jamesbrown1018@outlook.com |
9541bbb6b0bf90c84b9acd9ea52f6d18e8fd1ecf | 317feb2f3982a4c6aeed9ef32be7cec2eafad0db | /webapp_project/webapp_project/wsgi.py | 00cf53035533ee012b60803f3250f77a43bfb00c | [
"MIT"
] | permissive | r-singh/Test2 | 36be6dc13e97dc92b6f7f3bab6852bdb1755c2df | 4ebdf533a974be99d7f85f27fe2b03c8214f4307 | refs/heads/master | 2020-06-04T01:02:53.945429 | 2014-04-15T22:10:57 | 2014-04-15T22:10:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,576 | py | """
WSGI config for webapp_project project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from os.path import abspath, dirname
from sys import path
SITE_ROOT = dirname(dirname(abspath(__file__)))
path.append(SITE_ROOT)
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "jajaja.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "webapp_project.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| [
"vagrant@localhost.localdomain"
] | vagrant@localhost.localdomain |
4993d2d4cbdccdb0a704ee094ee77a8004d43aea | 460b4ec7a8e9332567bae637797c3cf29619d651 | /tasks/__init__.py | 932ca98768354ea6e883ea008dff684e65fa1e55 | [] | no_license | agajews/tfbrain-v2 | e1a79d805007913afb91dd60c73bdf64beba4122 | 86f595bf61d41f70af2bcc6ac3cd6abd6aa0615f | refs/heads/master | 2021-01-13T09:10:02.249595 | 2016-09-26T13:04:02 | 2016-09-26T13:04:02 | 69,250,088 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 109 | py | from .conf import *
from .helpers import *
from .rl import *
from .async_rl import *
# from .baxter import *
| [
"agajews@gmail.com"
] | agajews@gmail.com |
699cf439264c0a513273c23ce5fd10abd352b4dc | de9b8b7192a0a81e9249823bb2b86f0b7e452863 | /.history/classes/Season_20171106231139.py | 014af206fdc7b7ef79d3e86456871c59ec64a42e | [
"MIT"
] | permissive | reecebenson/uwe-dadsa-tennis-a | f5eaeb1b96d4e61f29279514e68eeea8ad6533db | d0763f819b300fcd0ce27041f5bc4ef0519c00bf | refs/heads/master | 2023-07-08T16:13:23.963348 | 2017-11-30T12:07:01 | 2017-11-30T12:07:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,359 | py | # DADSA - Assignment 1
# Reece Benson
from classes import Player as Player
class Season():
_app = None
_j_data = None
_name = None
_players = { }
_rounds = { }
_rounds_raw = { }
_settings = { }
def __init__(self, _app, name, j_data):
# Set our application as a variable
self._app = _app
# Set our Season JSON Data in a variable
self._j_data = j_data
# Debug
if(self._app.debug):
print("[LOAD]: Loaded Season '{0}'".format(name))
# Set variables
self._name = name
self._settings = j_data['settings']
def name(self):
return self._name
def settings(self):
return self._settings
def players(self):
return self._players
def add_player(self, name, gender):
if(not gender in self.players()):
self._players[gender] = [ ]
# Append our Players to their specific gender category
self._players[gender].append(Player.Player(name, gender, len(self.players()[gender])))
def rounds(self):
return self._rounds
def set_rounds(self):
for gender in self._rounds_raw:
for rnd in self._rounds_raw[gender]:
print(rnd)
def set_rounds_raw(self, rounds):
self._rounds_raw = rounds
self.set_rounds()
| [
"me@reecebenson.me"
] | me@reecebenson.me |
9722073d175f94f7dab389b73191271fca8bf56a | c9dc1df17ecb9e279eb4403b83358363cdbe7fee | /project/cms/migrations/0042_robotsfile.py | 6d6177838930114ae197e834346653b665c93c69 | [] | no_license | m0nte-cr1st0/keyua | c3894a94c9bfe73409078be11cb1d3f64831054c | b964ebb7e260fbebdbc27e3a571fed6278196cac | refs/heads/master | 2022-11-25T16:03:51.882386 | 2020-01-09T12:57:54 | 2020-01-09T12:57:54 | 232,809,529 | 0 | 0 | null | 2022-11-22T02:24:49 | 2020-01-09T12:58:10 | Python | UTF-8 | Python | false | false | 524 | py | # Generated by Django 2.0 on 2018-05-31 05:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cms', '0041_page_show_on_sitemap'),
]
operations = [
migrations.CreateModel(
name='RobotsFile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField(blank=True, null=True)),
],
),
]
| [
"dinamo.mutu111@gmail.com"
] | dinamo.mutu111@gmail.com |
8d1cdff2c9acddbe3598defdb2d60850a9dcf1f5 | 747febe786dd6b7fd6c63cfe73dbe3023354daa8 | /src/the_tale/the_tale/game/heroes/tests/test_position.py | f1979acb4fda6888d34a210e24e138b5f85f4429 | [
"BSD-3-Clause"
] | permissive | the-tale/the-tale | 4e4b8d91dc873a5fb935fe58e9721a877baa6d3f | e8450bd2332344da805b1851e728da5a3e5bf0ef | refs/heads/develop | 2023-08-01T13:53:46.835667 | 2022-12-25T18:04:56 | 2022-12-25T18:04:56 | 1,949,167 | 98 | 52 | BSD-3-Clause | 2023-02-15T18:57:33 | 2011-06-24T18:49:48 | Python | UTF-8 | Python | false | false | 2,558 | py |
import smart_imports
smart_imports.all()
class HeroPositionTest(utils_testcase.TestCase):
def setUp(self):
super().setUp()
self.place_1, self.place_2, self.place_3 = game_logic.create_test_map()
account = self.accounts_factory.create_account(is_fast=True)
self.storage = game_logic_storage.LogicStorage()
self.storage.load_account_data(account.id)
self.hero = self.storage.accounts_to_heroes[account.id]
self.road_1_2 = roads_logic.road_between_places(self.place_1, self.place_2)
self.road_2_3 = roads_logic.road_between_places(self.place_2, self.place_3)
def test_initialize(self):
self.assertNotEqual(self.hero.position.place_id, None)
self.assertEqual(self.hero.position.x, self.hero.position.place.x)
self.assertEqual(self.hero.position.y, self.hero.position.place.y)
self.assertEqual(self.hero.position.dx, 0)
self.assertEqual(self.hero.position.dy, 0)
self.assertFalse(self.hero.position.moved_out_place)
def test_set_position(self):
old_position = copy.deepcopy(self.hero.position)
self.hero.position.set_position(x=self.hero.position.x + 0.2,
y=self.hero.position.y - 0.7)
self.assertEqual(self.hero.position.place_id, None)
self.assertEqual(self.hero.position.x, old_position.x + 0.2)
self.assertEqual(self.hero.position.y, old_position.y - 0.7)
self.assertEqual(self.hero.position.cell_x, old_position.cell_x)
self.assertEqual(self.hero.position.cell_y, old_position.cell_y - 1)
self.hero.position.set_position(x=self.hero.position.x + 0.2,
y=self.hero.position.y - 0.7)
self.assertEqual(self.hero.position.place_id, None)
def test_can_visit_current_place__in_place(self):
pos = position.Position.create(place=self.place_1)
pos.set_position(x=pos.x + 0.4, y=pos.y - 0.2)
self.assertFalse(pos.can_visit_current_place(delta=0.1))
self.assertFalse(pos.can_visit_current_place(delta=0.3))
self.assertTrue(pos.can_visit_current_place(delta=0.45))
def test_can_visit_current_place__out_place(self):
pos = position.Position.create(place=self.place_1)
pos.set_position(x=pos.x + 1.4, y=pos.y - 0.2)
self.assertFalse(pos.can_visit_current_place(delta=0.1))
self.assertFalse(pos.can_visit_current_place(delta=0.3))
self.assertFalse(pos.can_visit_current_place(delta=0.45))
| [
"a.eletsky@gmail.com"
] | a.eletsky@gmail.com |
9ba48e21a5bb920ffd38c0a2f663d53bb638c96b | 658ab464e9c796f819ad85f569ad06ab6e66992e | /src/fun_advance/lambda_do.py | d3dc0c4582fc3a73521a52be349200ff33ca651a | [] | no_license | huowolf/python-demo | 03e5731ba632caada819dd70d0f9dc07c98308a1 | e3b80dcc0e0bc2437a0b2882e17563c8171460a2 | refs/heads/master | 2020-03-23T22:00:57.515258 | 2018-09-07T15:33:22 | 2018-09-07T15:33:22 | 142,147,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | #lambda x: x * x
#关键字lambda表示匿名函数,冒号前面的x表示函数参数
def build(x,y):
return lambda: x * x + y * y
ret=build(3, 4)
print(ret()) | [
"274956285@qq.com"
] | 274956285@qq.com |
d2d4a2b1dfa9689dfae43aaf9826ae9ce6d9ec9e | 14421a12c4e80395567e676394d369fd9619bd32 | /Scripts/PythonMidLvl/18a.py | f086632f4babe33a1d49b02b67f075a425925d86 | [] | no_license | jawor92/Python-Udemy-Mobilo | 7b331e8197233c3116e43e0b3c1110b9b878762e | 8098508835121a1536c2753bc4eedbf17163c93d | refs/heads/master | 2020-12-09T21:39:09.366604 | 2020-01-12T19:31:09 | 2020-01-12T19:31:09 | 233,423,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 10 12:59:09 2019
@author: Mateusz.Jaworski
"""
instruction = ['hello', 'borrow money', 'thanks',' bye']
instructionaproved = []
for x in instruction:
instructionaproved.append(x)
print("Instructions: ", x)
if x == 'abort':
print('Aborting!')
instructionaproved.clear()
break
else:
print('List of instructions approved: ', instructionaproved)
| [
"jaworski92@gmail.com"
] | jaworski92@gmail.com |
29c97db78034eada0c7a09ca9c148802244cb74a | 5ee028ee2582a2d566c22a32097a1fcbed314fcc | /openwsn-sw/software/openvisualizer/openvisualizer/openUI/OpenFrameState.py | e9875efea59a982a991f6aeb7e4cae664ae400df | [] | permissive | ssciancalepore/BitTransfer | 70c5b271743ebe683d7a3a37d595dbab132f903e | b9d343b0219259f4870e9362b99c27f544014b89 | refs/heads/master | 2022-06-20T18:38:03.271254 | 2019-09-15T04:56:32 | 2019-09-15T04:56:32 | 199,583,953 | 1 | 1 | BSD-3-Clause | 2022-06-03T22:45:01 | 2019-07-30T05:53:29 | C | UTF-8 | Python | false | false | 3,641 | py | import json
import OpenFrame
import OpenTable
import OpenGuiLib
class OpenFrameState(OpenFrame.OpenFrame):
def __init__(self,guiParent,width=None,height=None,frameName="frame",row=0,column=0,columnspan=1):
# store params
self.guiParent = guiParent
self.frameName = frameName
self.row = row
self.column = column
# initialize the parent class
OpenFrame.OpenFrame.__init__(self,guiParent,
width=width,
height=height,
frameName=frameName,
row=row,
column=column,
columnspan=columnspan,)
# local variables
self.updatePeriod = None
temp = OpenGuiLib.HeaderLabel(self.container,text="data")
#temp.grid(row=0,column=0)
self.data = OpenTable.OpenTable(self.container)
self.data.grid(row=1,column=0)
temp = OpenGuiLib.HeaderLabel(self.container,text="meta")
#temp.grid(row=2,column=0)
self.meta = OpenTable.OpenTable(self.container)
#self.meta.grid(row=3,column=0)
#======================== public ==========================================
def startAutoUpdate(self,updatePeriod,updateFunc,updateParams):
self.updatePeriod = updatePeriod
self.updateFunc = updateFunc
self.updateParams = updateParams
self.after(self.updatePeriod,self._cb_autoUpdate)
def stopAutoUpdate(self):
self.updatePeriod = None
def update(self,dataAndMeta):
assert(isinstance(dataAndMeta,dict))
assert('meta' in dataAndMeta)
assert(isinstance(dataAndMeta['meta'],list))
assert('data' in dataAndMeta)
assert(isinstance(dataAndMeta['data'],list))
if len(dataAndMeta['meta'])>0 and ('columnOrder' in dataAndMeta['meta'][0]):
self.data.update(dataAndMeta['data'],columnOrder=dataAndMeta['meta'][0]['columnOrder'].split('.'))
else:
self.data.update(dataAndMeta['data'])
self.meta.update(dataAndMeta['meta'])
#======================== private =========================================
def _cb_autoUpdate(self):
self.update(json.loads(self.updateFunc(*self.updateParams).toJson()))
if self.updatePeriod:
self.after(self.updatePeriod,self._cb_autoUpdate)
###############################################################################
if __name__=='__main__':
import OpenWindow
examplewindow = OpenWindow.OpenWindow("OpenFrameState")
exampleframestate = OpenFrameState(examplewindow,
frameName='exampleframestate',
row=0,
column=0)
exampleframestate.show()
exampleframestate.update(
{
'data': [
{
'data1': 'dA1',
'data2': 'dA2',
'data3': 'dA3',
},
],
'meta': [
{
'meta1': 'm1',
'meta2': 'm2',
},
],
}
)
examplewindow.startGui()
| [
"savio.sciancalepore@gmail.com"
] | savio.sciancalepore@gmail.com |
59d5b15c9732b600a0ca31557ae1b2819802a2ea | ef3a90b7c27ebf82d50deeb83a05342a812e6605 | /example-config.py | deffa805d58c9c720fbce17c25de7aebec4a269c | [
"MIT"
] | permissive | lun-4/catcord | f50d36fbeab94baaf6549e4218499e67a998680a | 44ea9a892ea169147a25a9c17280f8cc407304c0 | refs/heads/master | 2021-08-31T08:37:56.519112 | 2017-12-20T19:58:07 | 2017-12-20T19:58:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20 | py | token = 'asdasdasd'
| [
"lkmnds@gmail.com"
] | lkmnds@gmail.com |
326d4a7a256e8a3f83b1d53706bd8bf9f71ea751 | e6dab5aa1754ff13755a1f74a28a201681ab7e1c | /.parts/lib/django-1.5/tests/regressiontests/admin_scripts/simple_app/__init__.py | 8c134fbadb64e49649a8c6240ec0e7f5b85826f8 | [] | no_license | ronkagan/Euler_1 | 67679203a9510147320f7c6513eefd391630703e | 022633cc298475c4f3fd0c6e2bde4f4728713995 | refs/heads/master | 2021-01-06T20:45:52.901025 | 2014-09-06T22:34:16 | 2014-09-06T22:34:16 | 23,744,842 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 124 | py | /home/action/.parts/packages/googleappengine/1.9.4/lib/django-1.5/tests/regressiontests/admin_scripts/simple_app/__init__.py | [
"ron.y.kagan@gmail.com"
] | ron.y.kagan@gmail.com |
18cede7516bf74c3d0b77d1cd896e9f8359e1fe0 | 5e84763c16bd6e6ef06cf7a129bb4bd29dd61ec5 | /blimgui/dist/OpenGL/raw/GL/WIN/specular_fog.py | 804cb34e65bdca76ce90df49a444c08976a1bb30 | [
"MIT"
] | permissive | juso40/bl2sdk_Mods | 8422a37ca9c2c2bbf231a2399cbcb84379b7e848 | 29f79c41cfb49ea5b1dd1bec559795727e868558 | refs/heads/master | 2023-08-15T02:28:38.142874 | 2023-07-22T21:48:01 | 2023-07-22T21:48:01 | 188,486,371 | 42 | 110 | MIT | 2022-11-20T09:47:56 | 2019-05-24T20:55:10 | Python | UTF-8 | Python | false | false | 563 | py | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_WIN_specular_fog'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_WIN_specular_fog',error_checker=_errors._error_checker)
GL_FOG_SPECULAR_TEXTURE_WIN=_C('GL_FOG_SPECULAR_TEXTURE_WIN',0x80EC)
| [
"justin.sostmann@googlemail.com"
] | justin.sostmann@googlemail.com |
7a5b06736ee55b26c419191273ec7ee857c0319d | 18aee5d93a63eab684fe69e3aa0abd1372dd5d08 | /test/legacy_test/test_executor_feed_non_tensor.py | b7e1f02beb4b7512d45fb152d4ee8fe542d75cef | [
"Apache-2.0"
] | permissive | Shixiaowei02/Paddle | 8d049f4f29e281de2fb1ffcd143997c88078eadb | 3d4d995f26c48f7792b325806ec3d110fc59f6fc | refs/heads/develop | 2023-06-26T06:25:48.074273 | 2023-06-14T06:40:21 | 2023-06-14T06:40:21 | 174,320,213 | 2 | 1 | Apache-2.0 | 2022-12-28T05:14:30 | 2019-03-07T10:09:34 | C++ | UTF-8 | Python | false | false | 7,326 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy
import paddle
from paddle import fluid
class TestExecutor(unittest.TestCase):
def net(self):
lr = paddle.static.data(name="lr", shape=[], dtype='float32')
x = paddle.static.data(name="x", shape=[None, 1], dtype='float32')
y = paddle.static.data(name="y", shape=[None, 1], dtype='float32')
y_predict = paddle.static.nn.fc(x, size=1)
cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y)
avg_cost = paddle.mean(cost)
opt = fluid.optimizer.Adam(learning_rate=lr)
opt.minimize(avg_cost)
return lr, avg_cost
def test_program_feed_float(self):
main_program = fluid.Program()
startup_program = fluid.Program()
scope = fluid.Scope()
with fluid.program_guard(main_program, startup_program):
with fluid.scope_guard(scope):
cpu = fluid.CPUPlace()
exe = fluid.Executor(cpu)
lr, cost = self.net()
exe.run(startup_program)
train_data = numpy.array([[1.0], [2.0], [3.0], [4.0]]).astype(
'float32'
)
y_true = numpy.array([[2.0], [4.0], [6.0], [8.0]]).astype(
'float32'
)
a = 0.01
_lr, _ = exe.run(
feed={'x': train_data, 'y': y_true, 'lr': a},
fetch_list=[lr, cost],
return_numpy=False,
)
self.assertEqual(_lr._dtype(), lr.dtype)
self.assertEqual(_lr._dtype(), fluid.core.VarDesc.VarType.FP32)
self.assertEqual(type(a), float)
def test_program_feed_int(self):
main_program = fluid.Program()
startup_program = fluid.Program()
scope = fluid.Scope()
with fluid.program_guard(main_program, startup_program):
with fluid.scope_guard(scope):
cpu = fluid.CPUPlace()
exe = fluid.Executor(cpu)
lr, cost = self.net()
exe.run(startup_program)
train_data = numpy.array([[1.0], [2.0], [3.0], [4.0]]).astype(
'float32'
)
y_true = numpy.array([[2.0], [4.0], [6.0], [8.0]]).astype(
'float32'
)
a = 0
_lr, _ = exe.run(
feed={'x': train_data, 'y': y_true, 'lr': a},
fetch_list=[lr, cost],
return_numpy=False,
)
self.assertEqual(_lr._dtype(), lr.dtype)
self.assertEqual(_lr._dtype(), fluid.core.VarDesc.VarType.FP32)
self.assertEqual(type(a), int)
def test_program_feed_list(self):
main_program = fluid.Program()
startup_program = fluid.Program()
scope = fluid.Scope()
with fluid.program_guard(main_program, startup_program):
with fluid.scope_guard(scope):
cpu = fluid.CPUPlace()
exe = fluid.Executor(cpu)
lr, cost = self.net()
exe.run(startup_program)
train_data = [[1.0], [2.0], [3.0], [4.0]]
y_true = [[2.0], [4.0], [6.0], [8.0]]
a = 0
_lr, _ = exe.run(
feed={'x': train_data, 'y': y_true, 'lr': a},
fetch_list=[lr, cost],
return_numpy=False,
)
self.assertEqual(_lr._dtype(), lr.dtype)
self.assertEqual(_lr._dtype(), fluid.core.VarDesc.VarType.FP32)
self.assertEqual(type(y_true), list)
def test_compiled_program_feed_scalar(self):
main_program = fluid.Program()
startup_program = fluid.Program()
scope = fluid.Scope()
with fluid.program_guard(main_program, startup_program):
with fluid.scope_guard(scope):
lr, cost = self.net()
cpu = fluid.CPUPlace()
exe = fluid.Executor(cpu)
exe.run(startup_program)
compiled_prog = fluid.CompiledProgram(main_program)
train_data = numpy.array([[1.0], [2.0], [3.0], [4.0]]).astype(
'float32'
)
y_true = numpy.array([[2.0], [4.0], [6.0], [8.0]]).astype(
'float32'
)
a = 0.01
_lr, _ = exe.run(
compiled_prog,
feed={'x': train_data, 'y': y_true, 'lr': a},
fetch_list=[lr, cost],
return_numpy=False,
)
self.assertEqual(_lr._dtype(), lr.dtype)
self.assertEqual(_lr._dtype(), fluid.core.VarDesc.VarType.FP32)
self.assertEqual(type(a), float)
class TestAsLodTensor(unittest.TestCase):
def test_as_lodtensor_int32(self):
cpu = fluid.CPUPlace()
tensor = fluid.executor._as_lodtensor(
1.0, cpu, fluid.core.VarDesc.VarType.INT32
)
self.assertEqual(tensor._dtype(), fluid.core.VarDesc.VarType.INT32)
def test_as_lodtensor_fp64(self):
cpu = fluid.CPUPlace()
tensor = fluid.executor._as_lodtensor(
1, cpu, fluid.core.VarDesc.VarType.FP64
)
self.assertEqual(tensor._dtype(), fluid.core.VarDesc.VarType.FP64)
def test_as_lodtensor_assertion_error(self):
cpu = fluid.CPUPlace()
self.assertRaises(AssertionError, fluid.executor._as_lodtensor, 1, cpu)
def test_as_lodtensor_type_error(self):
cpu = fluid.CPUPlace()
self.assertRaises(
TypeError,
fluid.executor._as_lodtensor,
{"a": 1},
cpu,
fluid.core.VarDesc.VarType.INT32,
)
def test_as_lodtensor_list(self):
cpu = fluid.CPUPlace()
tensor = fluid.executor._as_lodtensor(
[1, 2], cpu, fluid.core.VarDesc.VarType.FP64
)
self.assertEqual(tensor._dtype(), fluid.core.VarDesc.VarType.FP64)
def test_as_lodtensor_tuple(self):
cpu = fluid.CPUPlace()
tensor = fluid.executor._as_lodtensor(
(1, 2), cpu, fluid.core.VarDesc.VarType.FP64
)
self.assertEqual(tensor._dtype(), fluid.core.VarDesc.VarType.FP64)
def test_as_lodtensor_nested_list(self):
cpu = fluid.CPUPlace()
self.assertRaises(
TypeError,
fluid.executor._as_lodtensor,
[{1.2, 1.2}, {1, 2}],
cpu,
fluid.core.VarDesc.VarType.INT32,
)
if __name__ == '__main__':
unittest.main()
| [
"noreply@github.com"
] | Shixiaowei02.noreply@github.com |
fc72a72f740bdd02af58325a660fb79fa0e9702d | 2ff7e53d5e512cd762217ca54317982e07a2bb0c | /carbon/common/lib/cherrypy/lib/static.py | ac793bf377f74bccc26e8e52a83faa20b07527a9 | [] | no_license | nanxijw/Clara-Pretty-One-Dick | 66d3d69426642b79e8fd4cc8e0bec23adeeca6d6 | 50de3488a2140343c364efc2615cf6e67f152be0 | refs/heads/master | 2021-01-19T09:25:07.555284 | 2015-02-17T21:49:33 | 2015-02-17T21:49:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,792 | py | #Embedded file name: carbon/common/lib/cherrypy/lib\static.py
import logging
import mimetypes
mimetypes.init()
mimetypes.types_map['.dwg'] = 'image/x-dwg'
mimetypes.types_map['.ico'] = 'image/x-icon'
mimetypes.types_map['.bz2'] = 'application/x-bzip2'
mimetypes.types_map['.gz'] = 'application/x-gzip'
import os
import re
import stat
import time
import cherrypy
from cherrypy._cpcompat import ntob, unquote
from cherrypy.lib import cptools, httputil, file_generator_limited
def serve_file(path, content_type = None, disposition = None, name = None, debug = False):
"""Set status, headers, and body in order to serve the given path.
The Content-Type header will be set to the content_type arg, if provided.
If not provided, the Content-Type will be guessed by the file extension
of the 'path' argument.
If disposition is not None, the Content-Disposition header will be set
to "<disposition>; filename=<name>". If name is None, it will be set
to the basename of path. If disposition is None, no Content-Disposition
header will be written.
"""
response = cherrypy.serving.response
if not os.path.isabs(path):
msg = "'%s' is not an absolute path." % path
if debug:
cherrypy.log(msg, 'TOOLS.STATICFILE')
raise ValueError(msg)
try:
st = os.stat(path)
except OSError:
if debug:
cherrypy.log('os.stat(%r) failed' % path, 'TOOLS.STATIC')
raise cherrypy.NotFound()
if stat.S_ISDIR(st.st_mode):
if debug:
cherrypy.log('%r is a directory' % path, 'TOOLS.STATIC')
raise cherrypy.NotFound()
response.headers['Last-Modified'] = httputil.HTTPDate(st.st_mtime)
cptools.validate_since()
if content_type is None:
ext = ''
i = path.rfind('.')
if i != -1:
ext = path[i:].lower()
content_type = mimetypes.types_map.get(ext, None)
if content_type is not None:
response.headers['Content-Type'] = content_type
if debug:
cherrypy.log('Content-Type: %r' % content_type, 'TOOLS.STATIC')
cd = None
if disposition is not None:
if name is None:
name = os.path.basename(path)
cd = '%s; filename="%s"' % (disposition, name)
response.headers['Content-Disposition'] = cd
if debug:
cherrypy.log('Content-Disposition: %r' % cd, 'TOOLS.STATIC')
content_length = st.st_size
fileobj = open(path, 'rb')
return _serve_fileobj(fileobj, content_type, content_length, debug=debug)
def serve_fileobj(fileobj, content_type = None, disposition = None, name = None, debug = False):
"""Set status, headers, and body in order to serve the given file object.
The Content-Type header will be set to the content_type arg, if provided.
If disposition is not None, the Content-Disposition header will be set
to "<disposition>; filename=<name>". If name is None, 'filename' will
not be set. If disposition is None, no Content-Disposition header will
be written.
CAUTION: If the request contains a 'Range' header, one or more seek()s will
be performed on the file object. This may cause undesired behavior if
the file object is not seekable. It could also produce undesired results
if the caller set the read position of the file object prior to calling
serve_fileobj(), expecting that the data would be served starting from that
position.
"""
response = cherrypy.serving.response
try:
st = os.fstat(fileobj.fileno())
except AttributeError:
if debug:
cherrypy.log('os has no fstat attribute', 'TOOLS.STATIC')
content_length = None
else:
response.headers['Last-Modified'] = httputil.HTTPDate(st.st_mtime)
cptools.validate_since()
content_length = st.st_size
if content_type is not None:
response.headers['Content-Type'] = content_type
if debug:
cherrypy.log('Content-Type: %r' % content_type, 'TOOLS.STATIC')
cd = None
if disposition is not None:
if name is None:
cd = disposition
else:
cd = '%s; filename="%s"' % (disposition, name)
response.headers['Content-Disposition'] = cd
if debug:
cherrypy.log('Content-Disposition: %r' % cd, 'TOOLS.STATIC')
return _serve_fileobj(fileobj, content_type, content_length, debug=debug)
def _serve_fileobj(fileobj, content_type, content_length, debug = False):
"""Internal. Set response.body to the given file object, perhaps ranged."""
response = cherrypy.serving.response
request = cherrypy.serving.request
if request.protocol >= (1, 1):
response.headers['Accept-Ranges'] = 'bytes'
r = httputil.get_ranges(request.headers.get('Range'), content_length)
if r == []:
response.headers['Content-Range'] = 'bytes */%s' % content_length
message = 'Invalid Range (first-byte-pos greater than Content-Length)'
if debug:
cherrypy.log(message, 'TOOLS.STATIC')
raise cherrypy.HTTPError(416, message)
if r:
if len(r) == 1:
start, stop = r[0]
if stop > content_length:
stop = content_length
r_len = stop - start
if debug:
cherrypy.log('Single part; start: %r, stop: %r' % (start, stop), 'TOOLS.STATIC')
response.status = '206 Partial Content'
response.headers['Content-Range'] = 'bytes %s-%s/%s' % (start, stop - 1, content_length)
response.headers['Content-Length'] = r_len
fileobj.seek(start)
response.body = file_generator_limited(fileobj, r_len)
else:
response.status = '206 Partial Content'
from mimetools import choose_boundary
boundary = choose_boundary()
ct = 'multipart/byteranges; boundary=%s' % boundary
response.headers['Content-Type'] = ct
if 'Content-Length' in response.headers:
del response.headers['Content-Length']
def file_ranges():
yield ntob('\r\n')
for start, stop in r:
if debug:
cherrypy.log('Multipart; start: %r, stop: %r' % (start, stop), 'TOOLS.STATIC')
yield ntob('--' + boundary, 'ascii')
yield ntob('\r\nContent-type: %s' % content_type, 'ascii')
yield ntob('\r\nContent-range: bytes %s-%s/%s\r\n\r\n' % (start, stop - 1, content_length), 'ascii')
fileobj.seek(start)
for chunk in file_generator_limited(fileobj, stop - start):
yield chunk
yield ntob('\r\n')
yield ntob('--' + boundary + '--', 'ascii')
yield ntob('\r\n')
response.body = file_ranges()
return response.body
if debug:
cherrypy.log('No byteranges requested', 'TOOLS.STATIC')
response.headers['Content-Length'] = content_length
response.body = fileobj
return response.body
def serve_download(path, name = None):
"""Serve 'path' as an application/x-download attachment."""
return serve_file(path, 'application/x-download', 'attachment', name)
def _attempt(filename, content_types, debug = False):
if debug:
cherrypy.log('Attempting %r (content_types %r)' % (filename, content_types), 'TOOLS.STATICDIR')
try:
content_type = None
if content_types:
r, ext = os.path.splitext(filename)
content_type = content_types.get(ext[1:], None)
serve_file(filename, content_type=content_type, debug=debug)
return True
except cherrypy.NotFound:
if debug:
cherrypy.log('NotFound', 'TOOLS.STATICFILE')
return False
def staticdir(section, dir, root = '', match = '', content_types = None, index = '', debug = False):
"""Serve a static resource from the given (root +) dir.
match
If given, request.path_info will be searched for the given
regular expression before attempting to serve static content.
content_types
If given, it should be a Python dictionary of
{file-extension: content-type} pairs, where 'file-extension' is
a string (e.g. "gif") and 'content-type' is the value to write
out in the Content-Type response header (e.g. "image/gif").
index
If provided, it should be the (relative) name of a file to
serve for directory requests. For example, if the dir argument is
'/home/me', the Request-URI is 'myapp', and the index arg is
'index.html', the file '/home/me/myapp/index.html' will be sought.
"""
request = cherrypy.serving.request
if request.method not in ('GET', 'HEAD'):
if debug:
cherrypy.log('request.method not GET or HEAD', 'TOOLS.STATICDIR')
return False
if match and not re.search(match, request.path_info):
if debug:
cherrypy.log('request.path_info %r does not match pattern %r' % (request.path_info, match), 'TOOLS.STATICDIR')
return False
dir = os.path.expanduser(dir)
if not os.path.isabs(dir):
if not root:
msg = 'Static dir requires an absolute dir (or root).'
if debug:
cherrypy.log(msg, 'TOOLS.STATICDIR')
raise ValueError(msg)
dir = os.path.join(root, dir)
if section == 'global':
section = '/'
section = section.rstrip('\\/')
branch = request.path_info[len(section) + 1:]
branch = unquote(branch.lstrip('\\/'))
filename = os.path.join(dir, branch)
if debug:
cherrypy.log('Checking file %r to fulfill %r' % (filename, request.path_info), 'TOOLS.STATICDIR')
if not os.path.normpath(filename).startswith(os.path.normpath(dir)):
raise cherrypy.HTTPError(403)
handled = _attempt(filename, content_types)
if not handled:
if index:
handled = _attempt(os.path.join(filename, index), content_types)
if handled:
request.is_index = filename[-1] in '\\/'
return handled
def staticfile(filename, root = None, match = '', content_types = None, debug = False):
"""Serve a static resource from the given (root +) filename.
match
If given, request.path_info will be searched for the given
regular expression before attempting to serve static content.
content_types
If given, it should be a Python dictionary of
{file-extension: content-type} pairs, where 'file-extension' is
a string (e.g. "gif") and 'content-type' is the value to write
out in the Content-Type response header (e.g. "image/gif").
"""
request = cherrypy.serving.request
if request.method not in ('GET', 'HEAD'):
if debug:
cherrypy.log('request.method not GET or HEAD', 'TOOLS.STATICFILE')
return False
if match and not re.search(match, request.path_info):
if debug:
cherrypy.log('request.path_info %r does not match pattern %r' % (request.path_info, match), 'TOOLS.STATICFILE')
return False
if not os.path.isabs(filename):
if not root:
msg = "Static tool requires an absolute filename (got '%s')." % filename
if debug:
cherrypy.log(msg, 'TOOLS.STATICFILE')
raise ValueError(msg)
filename = os.path.join(root, filename)
return _attempt(filename, content_types, debug=debug)
| [
"billchang.e@gmail.com"
] | billchang.e@gmail.com |
23c8af7aea382e4d3cfe4b8efe1bb5266fce4e09 | 5edd00b7d8f21fbd1cbb4a36722aba83f7f18656 | /test/12_dependency_versions/cibuildwheel_test.py | 55248bf20fc79a810d77636df181320684b887e3 | [
"MIT",
"BSD-2-Clause"
] | permissive | daleathan/cibuildwheel | ab24c8f3e597ba0948517430bd31935220e7b263 | 3e335044c61aa8da34d6fa904cae566cd1ce0808 | refs/heads/master | 2020-07-27T21:12:05.181999 | 2020-04-10T16:23:42 | 2020-04-10T16:23:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,266 | py | import os
import re
import pytest
import textwrap
import cibuildwheel.util
import utils
VERSION_REGEX = r'([\w-]+)==([^\s]+)'
def get_versions_from_constraint_file(constraint_file):
with open(constraint_file, encoding='utf8') as f:
constraint_file_text = f.read()
versions = {}
for package, version in re.findall(VERSION_REGEX, constraint_file_text):
versions[package] = version
return versions
@pytest.mark.parametrize('python_version', ['2.7', '3.5', '3.8'])
def test_pinned_versions(python_version):
if utils.platform == 'linux':
pytest.skip('linux doesn\'t pin individual tool versions, it pins manylinux images instead')
project_dir = os.path.dirname(__file__)
build_environment = {}
if python_version == '2.7':
constraint_filename = 'constraints-python27.txt'
build_pattern = '[cp]p27-*'
elif python_version == '3.5':
constraint_filename = 'constraints-python35.txt'
build_pattern = '[cp]p35-*'
else:
constraint_filename = 'constraints.txt'
build_pattern = '[cp]p38-*'
constraint_file = os.path.join(cibuildwheel.util.resources_dir, constraint_filename)
constraint_versions = get_versions_from_constraint_file(constraint_file)
for package in ['pip', 'setuptools', 'wheel', 'virtualenv']:
env_name = 'EXPECTED_{}_VERSION'.format(package.upper())
build_environment[env_name] = constraint_versions[package]
cibw_environment_option = ' '.join(
['{}={}'.format(k, v) for k, v in build_environment.items()]
)
# build and test the wheels
actual_wheels = utils.cibuildwheel_run(project_dir, add_env={
'CIBW_BUILD': build_pattern,
'CIBW_ENVIRONMENT': cibw_environment_option,
})
# also check that we got the right wheels
if python_version == '2.7':
expected_wheels = [w for w in utils.expected_wheels('spam', '0.1.0')
if '-cp27' in w or '-pp27' in w]
elif python_version == '3.5':
expected_wheels = [w for w in utils.expected_wheels('spam', '0.1.0')
if '-cp35' in w or '-pp35' in w]
elif python_version == '3.8':
expected_wheels = [w for w in utils.expected_wheels('spam', '0.1.0')
if '-cp38' in w or '-pp38' in w]
else:
raise ValueError('unhandled python version')
assert set(actual_wheels) == set(expected_wheels)
@pytest.mark.parametrize('python_version', ['2.7', '3.x'])
def test_dependency_constraints_file(tmp_path, python_version):
if utils.platform == 'linux':
pytest.skip('linux doesn\'t pin individual tool versions, it pins manylinux images instead')
project_dir = os.path.dirname(__file__)
tool_versions = {
'pip': '20.0.2',
'setuptools': '44.0.0' if python_version == '2.7' else '46.0.0',
'wheel': '0.34.2',
'virtualenv': '20.0.10',
}
constraints_file = tmp_path / 'constraints.txt'
constraints_file.write_text(textwrap.dedent(
'''
pip=={pip}
setuptools=={setuptools}
wheel=={wheel}
virtualenv=={virtualenv}
'''.format(**tool_versions)
))
build_environment = {}
for package_name, version in tool_versions.items():
env_name = 'EXPECTED_{}_VERSION'.format(package_name.upper())
build_environment[env_name] = version
cibw_environment_option = ' '.join(
['{}={}'.format(k, v) for k, v in build_environment.items()]
)
# build and test the wheels
actual_wheels = utils.cibuildwheel_run(project_dir, add_env={
'CIBW_BUILD': '[cp]p27-*' if python_version == '2.7' else '[cp]p3?-*',
'CIBW_ENVIRONMENT': cibw_environment_option,
'CIBW_DEPENDENCY_VERSIONS': str(constraints_file),
})
# also check that we got the right wheels
if python_version == '2.7':
expected_wheels = [w for w in utils.expected_wheels('spam', '0.1.0')
if '-cp27' in w or '-pp27' in w]
else:
expected_wheels = [w for w in utils.expected_wheels('spam', '0.1.0')
if '-cp27' not in w and '-pp27' not in w]
assert set(actual_wheels) == set(expected_wheels)
| [
"joerick@mac.com"
] | joerick@mac.com |
fae414b730695848b9b5b0e7e6095ee04e0a4ff6 | e6e81d0cd02223ca27f2c3f544b3c116e7617270 | /LeetCodePremium/1123.lowest-common-ancestor-of-deepest-leaves.py | 5196cf0120d686be09c5e23c898b8fa6345ad720 | [] | no_license | ashjambhulkar/objectoriented | 86166640b0546713095dd5d8804fc78d31782662 | 6f07b50590ceef231be38d6d7b8c73a40c1152e9 | refs/heads/master | 2022-05-03T23:28:38.674275 | 2022-04-26T21:37:31 | 2022-04-26T21:37:31 | 249,091,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,210 | py | #
# @lc app=leetcode id=1123 lang=python3
#
# [1123] Lowest Common Ancestor of Deepest Leaves
#
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def lcaDeepestLeaves(self, root: TreeNode) -> TreeNode:
# self.deepest, self.lca = 0, None
# def helper(root, depth):
# self.deepest = max(self.deepest, depth)
# if not root:
# return depth
# left = helper(root.left, depth+1)
# right = helper(root.right, depth+1)
# if left == right == self.deepest:
# self.lca = root
# return max(left, right)
# helper(root, 0)
# return self.lca
def helper(root):
if not root:
return 0, None
h1, lca1 = helper(root.left)
h2, lca2 = helper(root.right)
if h1 > h2:
return h1+1, lca1
if h2 > h1:
return h2+1, lca2
return h1+1, root
return helper(root)[1]
# @lc code=end
| [
"ashjambhulkar@hotmail.com"
] | ashjambhulkar@hotmail.com |
f592e8dba7f277e3bda52fc030d724ef8cd02ce4 | 2fb7f13ec25781a49f0814d2010f9c090329e659 | /tests/test_builtins/uk/test_ukraine_spec.py | 4c946615b09bce648d28e031e487a321fa6d5292 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Shanmugapriya03/mimesis | a526769be2e35d297b7ec68c31f1072b6f51d64a | 649253fef05c6b5c362805000c1d7a99898aa0fe | refs/heads/master | 2020-08-06T03:58:49.276067 | 2019-10-04T06:42:57 | 2019-10-04T06:42:57 | 212,824,895 | 1 | 0 | MIT | 2019-10-04T13:46:56 | 2019-10-04T13:46:56 | null | UTF-8 | Python | false | false | 540 | py | import pytest
from mimesis.builtins import UkraineSpecProvider
from mimesis.enums import Gender
from mimesis.exceptions import NonEnumerableError
@pytest.fixture
def ukraine():
return UkraineSpecProvider()
@pytest.mark.parametrize(
'gender', [
Gender.FEMALE,
Gender.MALE,
],
)
def test_patronymic(ukraine, gender):
result = ukraine.patronymic(gender=gender)
assert result is not None
assert len(result) >= 4
with pytest.raises(NonEnumerableError):
ukraine.patronymic(gender='nil')
| [
"likid.geimfari@gmail.com"
] | likid.geimfari@gmail.com |
78b6b35553aa4d72ed3306ef2898427322922902 | c8b2cc8965283e2d940c2cd5432c3fa682e3638c | /examples/terminal.py | a7a3d6e85e53adb1af152c49103556500a56cb06 | [
"MIT"
] | permissive | gitter-badger/DevAssist | 1ae87fc3505f0feb7657921377f65b167a0b39ff | be0b4221b2d379cf9e8c65454f1329d6fb910f0c | refs/heads/master | 2021-01-16T19:48:45.694776 | 2016-02-10T15:33:44 | 2016-02-10T15:33:44 | 51,457,297 | 0 | 0 | null | 2016-02-10T17:20:25 | 2016-02-10T17:20:25 | null | UTF-8 | Python | false | false | 391 | py | from DevAssist import DevAssist
my_devassist = DevAssist()
my_devassist.process("")
while True:
# @TODO: Normalize input from different versions of python
user_input = raw_input("Human: ")
# Leave if the user is done
if user_input == "quit":
exit(0)
# Generate response
response = my_devassist.process(user_input)
# Print response
print response
| [
"valetolpegin@gmail.com"
] | valetolpegin@gmail.com |
a942bedde48b5e6541bc1f2dc7af4a10448cbf40 | fc0eda8560a26c88b790d236070ed0559d0dc4a4 | /leetcode/basicDS08_heap/b02_lc347_top_k_frequent_elements.py | 21a0b6c42751ec427c3d9bec2556f59a52f81e92 | [] | no_license | pankypan/DataStructureAndAlgo | b4bd417a16cdb594bbed2ca0220dbd63eb60f3c1 | 6c5d40d57d378994236549f8dea906c75121eadf | refs/heads/master | 2021-08-03T01:22:08.442709 | 2021-07-19T14:56:44 | 2021-07-19T14:56:44 | 279,599,190 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,078 | py | # basicDS05_heap https://leetcode-cn.com/problems/top-k-frequent-elements/
import heapq
from typing import List
class Solution:
@staticmethod
def get_num_statics(nums) -> dict:
hash_table = dict()
for num in nums:
if num in hash_table:
hash_table[num] += 1
else:
hash_table[num] = 1
return hash_table
def topKFrequent(self, nums: List[int], k: int) -> List[int]:
hash_table = self.get_num_statics(nums)
priority_queue = list()
count = 0
for key, val in hash_table.items():
if count < k:
heapq.heappush(priority_queue, [val, key])
else:
if val > priority_queue[0][0]:
heapq.heappop(priority_queue)
heapq.heappush(priority_queue, [val, key])
count += 1
return [item_lis[1] for item_lis in priority_queue]
if __name__ == '__main__':
s = Solution()
print(s.topKFrequent([1, 1, 1, 2, 2, 3], 2))
print(s.topKFrequent([1], 1))
| [
"1356523334@qq.com"
] | 1356523334@qq.com |
a25af68831ed484b843fa0dcc34da703e3117143 | adb280d422df64880debadd3b67a97f0c0869989 | /src/utils/logger.py | bad9ce3934a54795b6f9d74f01af27c3ae13835a | [
"Apache-2.0"
] | permissive | biothings/biothings_explorer_web_old | d924ff888a7c7e5a01c2e93cd65f3c8a795648e8 | ee7e60aa7a6eb5c9944921493063a76fdc1d3db2 | refs/heads/master | 2022-03-17T00:01:47.851518 | 2019-11-07T21:26:10 | 2019-11-07T21:26:10 | 82,010,818 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,066 | py | import os, time, datetime
import logging
import inspect
def get_logger(logger_name, log_folder=None, timestamp="%Y%m%d", level=logging.DEBUG):
"""
Configure a logger object from logger_name and return (logger, logfile)
"""
from config import LOG_FILE_ROOT
# if doesn't specify a log folder, use the default one in config
if not log_folder:
log_folder = LOG_FILE_ROOT
if not os.path.exists(log_folder):
os.makedirs(log_folder)
if timestamp:
logfile = os.path.join(log_folder, '%s_%s.log' % (logger_name, time.strftime(timestamp, datetime.datetime.now().timetuple())))
else:
logfile = os.path.join(log_folder, '%s.log' % logger_name)
fmt = logging.Formatter('%(asctime)s [%(filename)s:%(lineno)s - %(funcName)20s() ] - %(name)s - %(levelname)s -- %(message)s', datefmt="%H:%M:%S")
logger = logging.getLogger(logger_name)
logger.setLevel(level)
fh = logging.FileHandler(logfile)
fh.setFormatter(fmt)
fh.name = "logfile"
logger.addHandler(fh)
return (logger, logfile)
| [
"kevinxin@scripps.edu"
] | kevinxin@scripps.edu |
d8854d358f9e5cdd59f0e50511c3f669895a8b8b | 3a74764c3fc38f87cd2ed0ba9e96b23ad9a0677e | /bite_144/bite_144.py | df94d9a8f587deafa609f37f3a7497f89f2aafce | [] | no_license | nalwayv/bitesofpy | 7dbc7cb55c9bc3c111f67243759cf56a2b785f51 | 56b0f7f85fd4b18d11a1b5df8da0a95e5ba2dcaa | refs/heads/master | 2023-05-26T05:46:06.859108 | 2020-02-05T00:02:35 | 2020-02-05T00:02:35 | 216,651,358 | 2 | 0 | null | 2023-05-22T22:43:16 | 2019-10-21T19:41:25 | HTML | UTF-8 | Python | false | false | 1,944 | py | """
Bite 144. Calculate the Number of Months Passed
"""
from datetime import date
from dateutil.relativedelta import relativedelta
START_DATE = date(2018, 11, 1)
MIN_DAYS_TO_COUNT_AS_MONTH = 10
MONTHS_PER_YEAR = 12
def calc_months_passed(year, month, day):
"""Construct a date object from the passed in arguments.
If this fails due to bad inputs reraise the exception.
Also if the new date is < START_DATE raise a ValueError.
Then calculate how many months have passed since the
START_DATE constant. We suggest using dateutil.relativedelta!
One rule: if a new month is >= 10 (MIN_DAYS_TO_COUNT_AS_MONTH)
days in, it counts as an extra month.
For example:
date(2018, 11, 10) = 9 days in => 0 months
date(2018, 11, 11) = 10 days in => 1 month
date(2018, 12, 11) = 1 month + 10 days in => 2 months
date(2019, 12, 11) = 1 year + 1 month + 10 days in => 14 months
etc.
See the tests for more examples.
Return the number of months passed int.
"""
_date = date(year, month, day)
if _date < START_DATE:
raise ValueError("date too low")
if START_DATE == _date:
return 0
rd = relativedelta(_date, START_DATE)
_d = 1 if rd.days % MIN_DAYS_TO_COUNT_AS_MONTH == 0 else 0
return _d + rd.months + rd.years * MONTHS_PER_YEAR
if __name__ == "__main__":
print(calc_months_passed(2018, 11, 1) == 0)
print(calc_months_passed(2018, 11, 10) == 0)
print(calc_months_passed(2018, 11, 11) == 1)
print(calc_months_passed(2018, 12, 10) == 1)
print(calc_months_passed(2018, 12, 11) == 2)
print(calc_months_passed(2019, 12, 10) == 13)
print(calc_months_passed(2019, 12, 11) == 14)
try:
calc_months_passed('a',10,1)
except TypeError:
print('type error')
try:
calc_months_passed(2018, 10, 1)
except ValueError:
print('value error')
| [
"nalwayv@googlemail.com"
] | nalwayv@googlemail.com |
bed041cf24e432a76115526e3956a15fd8d80839 | 2f63688febd21dc3ae6b19abfa79ad313c820154 | /For Irene/DFS/0938_Range_Sum_of_BST.py | 8028386c36fd192c2ea01a1603763c2e0bd3c142 | [] | no_license | novayo/LeetCode | cadd03587ee4ed6e35f60294070165afc1539ac8 | 54d0b3c237e0ffed8782915d6b75b7c6a0fe0de7 | refs/heads/master | 2023-08-14T00:35:15.528520 | 2023-07-30T05:56:05 | 2023-07-30T05:56:05 | 200,248,146 | 8 | 1 | null | 2022-11-19T04:37:54 | 2019-08-02T14:24:19 | Python | UTF-8 | Python | false | false | 618 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def rangeSumBST(self, root: Optional[TreeNode], low: int, high: int) -> int:
ans = 0
def dfs(node):
nonlocal ans
if not node:
return
if low <= node.val <= high:
ans += node.val
dfs(node.left)
dfs(node.right)
dfs(root)
return ans
| [
"f14051172@gs.ncku.edu.tw"
] | f14051172@gs.ncku.edu.tw |
be17f52a78be4a863aa7b18e00af390d2d11966e | c43c88015f9498aed5f3b5a339d245c31781444e | /Free/l10n_by_doc/report/report_waybill.py | 347ec855716e4142af55494c78208705ba538981 | [] | no_license | mulaudzicalvin/perpul | 65106d41d5197fea17628ac1a7fa7e581d29d75e | 00e3a5ee1771d2e09a48460ca23c2e9c2ef507d6 | refs/heads/master | 2020-03-09T18:39:33.131420 | 2018-02-05T05:17:36 | 2018-02-05T05:17:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 969 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# Perpul
# Copyright (C) 2016 CodUP (<http://codup.com>).
#
##############################################################################
from flectra import api, models
from report_helper import QWebHelper
class ByWayBillReport(models.AbstractModel):
_name = 'report.l10n_by_doc.report_waybill'
@api.model
def render_html(self, docids, data=None):
Report = self.env['report']
report = Report._get_report_from_name('l10n_by_doc.report_waybill')
selected_modules = self.env[report.model].browse(docids)
docargs = {
'helper': QWebHelper(),
'doc_ids': docids,
'doc_model': report.model,
'docs': selected_modules,
}
return Report.render('l10n_by_doc.report_waybill', docargs)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | [
"daniel.podvesker@perpul.co"
] | daniel.podvesker@perpul.co |
185d598106dfbf637a7c47faa42c6c4fe6450c31 | 24d8cf871b092b2d60fc85d5320e1bc761a7cbe2 | /emesene/rev1286-1505/base-trunk-1286/desktop.py | aecfa820abe7f3244e3c947efdbbd7b4a8c296bb | [] | no_license | joliebig/featurehouse_fstmerge_examples | af1b963537839d13e834f829cf51f8ad5e6ffe76 | 1a99c1788f0eb9f1e5d8c2ced3892d00cd9449ad | refs/heads/master | 2016-09-05T10:24:50.974902 | 2013-03-28T16:28:47 | 2013-03-28T16:28:47 | 9,080,611 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 6,661 | py | """
Simple desktop integration for Python. This module provides desktop environment
detection and resource opening support for a selection of common and
standardised desktop environments.
Copyright (C) 2005, 2006 Paul Boddie <paul@boddie.org.uk>
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
--------
Desktop Detection
-----------------
To detect a specific desktop environment, use the get_desktop function.
To detect whether the desktop environment is standardised (according to the
proposed DESKTOP_LAUNCH standard), use the is_standard function.
Opening URLs
------------
To open a URL in the current desktop environment, relying on the automatic
detection of that environment, use the desktop.open function as follows:
desktop.open("http://www.python.org")
To override the detected desktop, specify the desktop parameter to the open
function as follows:
desktop.open("http://www.python.org", "KDE") # Insists on KDE
desktop.open("http://www.python.org", "GNOME") # Insists on GNOME
Without overriding using the desktop parameter, the open function will attempt
to use the "standard" desktop opening mechanism which is controlled by the
DESKTOP_LAUNCH environment variable as described below.
The DESKTOP_LAUNCH Environment Variable
---------------------------------------
The DESKTOP_LAUNCH environment variable must be shell-quoted where appropriate,
as shown in some of the following examples:
DESKTOP_LAUNCH="kdialog --msgbox" Should present any opened URLs in
their entirety in a KDE message box.
(Command "kdialog" plus parameter.)
DESKTOP_LAUNCH="my\ opener" Should run the "my opener" program to
open URLs.
(Command "my opener", no parameters.)
DESKTOP_LAUNCH="my\ opener --url" Should run the "my opener" program to
open URLs.
(Command "my opener" plus parameter.)
Details of the DESKTOP_LAUNCH environment variable convention can be found here:
http://lists.freedesktop.org/archives/xdg/2004-August/004489.html
"""
__version__ = "0.2.3"
import os
import sys
try:
import subprocess
def _run(cmd, shell, wait):
opener = subprocess.Popen(cmd, shell=shell)
if wait: opener.wait()
return opener.pid
except ImportError:
import popen2
def _run(cmd, shell, wait):
opener = popen2.Popen3(cmd)
if wait: opener.wait()
return opener.pid
import commands
import webbrowser # fallback
override = ''
def get_desktop(dontoverride=False):
"""
Detect the current desktop environment, returning the name of the
environment. If no environment could be detected, None is returned.
"""
global override
if override and not dontoverride:
return 'override'
elif os.environ.has_key("KDE_FULL_SESSION") or \
os.environ.has_key("KDE_MULTIHEAD"):
return "KDE"
elif os.environ.has_key("DESKTOP_SESSION") and \
os.environ['DESKTOP_SESSION'] == 'xfce4':
return 'xfce4'
elif os.environ.has_key("GNOME_DESKTOP_SESSION_ID") or \
os.environ.has_key("GNOME_KEYRING_SOCKET"):
return "GNOME"
elif sys.platform == "darwin":
return "Mac OS X"
elif hasattr(os, "startfile"):
return "Windows"
else:
return None
def is_standard():
"""
Return whether the current desktop supports standardised application
launching.
"""
return os.environ.has_key("DESKTOP_LAUNCH")
def open(url, desktop=None, wait=0):
"""
Open the 'url' in the current desktop's preferred file browser. If the
optional 'desktop' parameter is specified then attempt to use that
particular desktop environment's mechanisms to open the 'url' instead of
guessing or detecting which environment is being used.
Suggested values for 'desktop' are "standard", "KDE", "GNOME", "Mac OS X",
"Windows" where "standard" employs a DESKTOP_LAUNCH environment variable to
open the specified 'url'. DESKTOP_LAUNCH should be a command, possibly
followed by arguments, and must have any special characters shell-escaped.
The process identifier of the "opener" (ie. viewer, editor, browser or
program) associated with the 'url' is returned by this function. If the
process identifier cannot be determined, None is returned.
An optional 'wait' parameter is also available for advanced usage and, if
'wait' is set to a true value, this function will wait for the launching
mechanism to complete before returning (as opposed to immediately returning
as is the default behaviour).
"""
detected = get_desktop()
if (desktop is None or desktop == "override") and detected == "override":
global override
arg = override.replace("%url%", commands.mkarg(url))
return _run(arg, 1, wait)
elif (desktop is None or desktop == "standard") and is_standard():
arg = "".join([os.environ["DESKTOP_LAUNCH"], commands.mkarg(url)])
return _run(arg, 1, wait)
elif (desktop is None or desktop == "Windows") and detected == "Windows":
try:
return os.startfile(url)
except OSError:
return
elif desktop is None:
desktop = detected
cmd = get_command(desktop, url)
if cmd:
return _run(cmd, 0, wait)
else:
webbrowser.open(url)
def get_command(desktop, url):
'''Test for desktops where the overriding is not verified.'''
if desktop == "KDE":
return ["kfmclient", "exec", url]
elif desktop == "GNOME":
return ["gnome-open", url]
elif desktop == 'xfce4':
return ["exo-open", url]
elif desktop == "Mac OS X":
return ["open", url]
elif desktop == "standard":
return ['$DESKTOP_LAUNCH']
elif desktop == "Windows":
return ['os.startfile()']
else:
return None
| [
"joliebig@fim.uni-passau.de"
] | joliebig@fim.uni-passau.de |
ac96d791fd218866f388d481fafeb181f964bfea | ac5c28044dcad1331aaf11ba2112e5d18ed53472 | /experiment/models.py | 873e6490b08737f900c4e4b79e6bbfdd642170fb | [
"MIT"
] | permissive | seakers/daphne_brain | 2c4cd278591db7e21d639dbaa87235383d1ac5f5 | 4e3220d41552d224fee375d85f7cbc8106de7fc8 | refs/heads/main | 2023-08-21T14:21:00.795145 | 2022-10-20T20:26:05 | 2022-10-20T20:26:05 | 85,344,849 | 1 | 1 | MIT | 2023-02-16T06:37:34 | 2017-03-17T18:51:12 | PLpgSQL | UTF-8 | Python | false | false | 2,230 | py | from django.db import models
# Experiment Context (to perform experiments with human subjects and Daphne)
from daphne_context.models import UserInformation
class ExperimentContext(models.Model):
user_information = models.OneToOneField(UserInformation, on_delete=models.CASCADE)
is_running = models.BooleanField()
experiment_id = models.IntegerField()
current_state = models.TextField()
# A data structure defining an experimental stage
class ExperimentStage(models.Model):
experimentcontext = models.ForeignKey(ExperimentContext, on_delete=models.CASCADE)
type = models.CharField(max_length=50)
start_date = models.DateTimeField()
end_date = models.DateTimeField()
end_state = models.TextField()
class ExperimentAction(models.Model):
experimentstage = models.ForeignKey(ExperimentStage, on_delete=models.CASCADE)
action = models.TextField()
date = models.DateTimeField()
# An allowed command for Daphne (to be used with experiments to limit functionalities programmatically)
class AllowedCommand(models.Model):
user_information = models.ForeignKey(UserInformation, on_delete=models.CASCADE)
# Command Type Choice
COMMAND_TYPES = (
('engineer', 'Engineer Commands'),
('analyst', 'iFEED Commands'),
('explorer', 'Explorer Commands'),
('historian', 'Historian Commands'),
('critic', 'Critic Commands'),
('engineer_instruments', 'Instruments Cheatsheet'),
('engineer_instrument_parameters', 'Instrument Parameters Cheatsheet'),
('engineer_measurements', 'Measurements Cheatsheet'),
('engineer_stakeholders', 'Stakeholders Cheatsheet'),
('engineer_objectives', 'Objectives Cheatsheet'),
('engineer_subobjectives', 'Subobjectives Cheatsheet'),
('historian_measurements', 'Historical Measurements Cheatsheet'),
('historian_missions', 'Historical Missions Cheatsheet'),
('historian_technologies', 'Historical Technologies Cheatsheet'),
('historian_space_agencies', 'Space Agencies Cheatsheet'),
)
command_type = models.CharField(max_length=40, choices=COMMAND_TYPES)
# Command number
command_descriptor = models.IntegerField()
| [
"ani300@gmail.com"
] | ani300@gmail.com |
bf7013d24eab75836cdf292866fe2c6c680b154d | 53f5f694a83800b4465bd0417820117832f0f97d | /ifcbdb/dashboard/migrations/0019_auto_20190528_0647.py | 884b52bac1da41ff17061cc72bb96b746517d698 | [
"MIT"
] | permissive | WHOIGit/ifcbdb | 083da3dd223e6791b35ff952dc2e6b2970b8db50 | 36cac457b31b614b7eda2c15e28cbd2fbba7d388 | refs/heads/master | 2023-08-31T03:55:05.539434 | 2023-06-22T11:48:05 | 2023-06-22T11:48:05 | 178,424,730 | 5 | 6 | MIT | 2023-08-30T17:35:42 | 2019-03-29T14:53:50 | Python | UTF-8 | Python | false | false | 1,183 | py | # Generated by Django 2.1.7 on 2019-05-28 06:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0018_auto_20190508_1659'),
]
operations = [
migrations.AlterField(
model_name='bin',
name='concentration',
field=models.FloatField(default=-9999999),
),
migrations.AlterField(
model_name='bin',
name='humidity',
field=models.FloatField(default=-9999999),
),
migrations.AlterField(
model_name='bin',
name='look_time',
field=models.FloatField(default=-9999999),
),
migrations.AlterField(
model_name='bin',
name='ml_analyzed',
field=models.FloatField(default=-9999999),
),
migrations.AlterField(
model_name='bin',
name='run_time',
field=models.FloatField(default=-9999999),
),
migrations.AlterField(
model_name='bin',
name='temperature',
field=models.FloatField(default=-9999999),
),
]
| [
"joefutrelle@gmail.com"
] | joefutrelle@gmail.com |
5f41b1f3ee4e8792dd6455938e861c3e1a67e80c | ef9ab6d3ebb22fea68901c0e681abc25e5379fa6 | /FanFilmE2/fanfilm/resources/lib/sources/pl/serialeco.py | dc4170f3b9a78c322736bf81789d6b1624837c71 | [] | no_license | OpenPE/eePlugins | b2098a082ee5a5d929a29683e2334dc3895cb4b5 | 8f4a2963d5489e760eb778a10f00c3b49356d517 | refs/heads/master | 2020-07-30T11:27:28.198034 | 2019-09-16T15:13:55 | 2019-09-16T15:13:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,135 | py | # -*- coding: utf-8 -*-
'''
Covenant Add-on
Copyright (C) 2018 :)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
import requests
try:
import urlparse
except:
import urllib.parse as urlparse
try:
import HTMLParser
from HTMLParser import HTMLParser
except:
from html.parser import HTMLParser
try:
import urllib2
except:
import urllib.request as urllib2
from resources.lib.libraries import source_utils
from resources.lib.libraries import client
from ptw.debug import log_exception
class source:
def __init__(self):
self.priority = 1
self.language = ['pl']
self.domains = ['seriale.co']
self.base_link = 'http://seriale.co'
self.session = requests.Session()
def contains_word(self, str_to_check, word):
if str(word).lower() in str(str_to_check).lower():
return True
return False
def contains_all_words(self, str_to_check, words):
for word in words:
if not self.contains_word(str_to_check, word):
return False
return True
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
titles = (tvshowtitle, localtvshowtitle)
return titles, year
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
return self.search_ep(url[0], season, episode, url[1]) # url = titles & year
def search_ep(self, titles, season, episode, year):
try:
for title in titles:
data = {
'fid_name': title,
'sezon': season,
'odcinek': episode,
'title': title
}
result = requests.post('http://178.19.110.218/forumserialeco/skrypt/szukaj3.php', data=data).content
result = result.decode('utf-8')
h = HTMLParser()
result = h.unescape(result)
if result:
return title, season, episode
except Exception as e:
log_exception()
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
data = {
'fid_name': url[0],
'sezon': url[1],
'odcinek': url[2],
'title': url[0]
}
result = requests.post('http://178.19.110.218/forumserialeco/skrypt/szukaj3.php', data=data).content
result = result.decode('utf-8')
h = HTMLParser()
result = h.unescape(result)
if result:
wersja = re.findall("""wersja: <b>(.*?)<\/b>""", result)
id = re.findall("""url='(.*?)'""", result)
for item in zip(wersja, id):
try:
if item[1]:
info = self.get_lang_by_type(item[0])
content = client.request("http://seriale.co/frame.php?src=" + item[1])
video_link = str(client.parseDOM(content, 'iframe', ret='src')[0])
valid, host = source_utils.is_host_valid(video_link, hostDict)
if valid:
sources.append(
{'source': host, 'quality': 'SD', 'language': info[0], 'url': video_link,
'info': info[1], 'direct': False,
'debridonly': False})
else:
continue
except:
continue
return sources
except:
log_exception()
return sources
def get_lang_by_type(self, lang_type):
if "dubbing" in lang_type.lower():
if "kino" in lang_type.lower():
return 'pl', 'Dubbing Kino'
return 'pl', 'Dubbing'
elif 'lektor pl' in lang_type.lower():
return 'pl', 'Lektor'
elif 'lektor' in lang_type.lower():
return 'pl', 'Lektor'
elif 'napisy pl' in lang_type.lower():
return 'pl', 'Napisy'
elif 'napisy' in lang_type.lower():
return 'pl', 'Napisy'
elif 'POLSKI' in lang_type.lower():
return 'pl', None
elif 'pl' in lang_type.lower():
return 'pl', None
return 'en', None
def resolve(self, url):
return str(url)
| [
"zdzislaw22@windowslive.com"
] | zdzislaw22@windowslive.com |
dac5f9ee311610fe481bf4209faef487ff95852d | 304033f60097c489cbc60aab639be45ccdbef1a5 | /algorithms/boj/dijkstra/1162.py | f27956edaec3a3b91c338c4611930bbae2ba73f1 | [] | no_license | pgw928/TIL | 3d0c47c07bd1f5c73826daf8579a2b0e3f93cb95 | 765906f1e6eecad4ad8ec9bf704041433d7eb304 | refs/heads/master | 2023-06-29T05:46:30.039815 | 2021-08-10T17:38:11 | 2021-08-10T17:38:11 | 288,923,095 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,024 | py | import sys
import heapq
from math import inf
from itertools import combinations
input = sys.stdin.readline
N, M, K = map(int, input().split())
pre = [tuple(map(int, input().split())) for _ in range(M)]
combs = combinations(range(M),K)
def dijkstra(start):
hq = []
heapq.heappush(hq, (0, start))
distance[start] = 0
while hq:
dist, node = heapq.heappop(hq)
if distance[node] < dist:
continue
for n_node, n_dist in graph[node]:
tmp = n_dist + distance[node]
if tmp < distance[n_node]:
distance[n_node] = tmp
heapq.heappush(hq, (tmp, n_node))
m = inf
for A in combs:
distance = [inf] * (N + 1)
graph = [[] for _ in range(M + 1)]
for i in range(M):
a, b, c = pre[i]
if i in A:
graph[a].append((b,0))
graph[b].append((a,0))
else:
graph[a].append((b,c))
graph[b].append((a,c))
dijkstra(1)
m = min(m, distance[N])
print(m) | [
"pku928@naver.com"
] | pku928@naver.com |
467289ab1680efe59ad7630bb59afa447224159b | 49663ea34b41c8180d7484f778f5cad2e701d220 | /tests/platform_tests/api/test_module.py | 8310c0f31aff76975ad6a86a2a124cdbddc53e25 | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | stepanblyschak/sonic-mgmt | ed08c98e7bff1615b057daa8711686aa5986073d | a1ae1e0b4e9927e6f52916f76121780d19ec3e54 | refs/heads/master | 2023-04-07T01:30:11.403900 | 2023-03-29T10:16:52 | 2023-03-29T10:16:52 | 135,678,178 | 0 | 0 | NOASSERTION | 2023-03-29T16:13:55 | 2018-06-01T06:41:49 | Python | UTF-8 | Python | false | false | 25,044 | py | import logging
import re
import pytest
import yaml
from time import sleep
from tests.common.helpers.platform_api import chassis, module
from tests.platform_tests.cli.util import get_skip_mod_list
from platform_api_test_base import PlatformApiTestBase
from tests.common.helpers.assertions import pytest_assert
###################################################
# TODO: Remove this after we transition to Python 3
import sys
if sys.version_info.major == 3:
STRING_TYPE = str
else:
STRING_TYPE = str
# END Remove this after we transition to Python 3
###################################################
logger = logging.getLogger(__name__)
pytestmark = [
pytest.mark.disable_loganalyzer, # disable automatic loganalyzer
pytest.mark.topology('any')
]
REGEX_MAC_ADDRESS = r'^([0-9A-Fa-f]{2}:){5}([0-9A-Fa-f]{2})$'
REGEX_SERIAL_NUMBER = r'^[A-Za-z0-9]+$'
REGEX_IP_ADDRESS = r'^(?:[0-9]{1,3}\.){3}([0-9]{1,3})$'
MODULE_TYPE = ['SUPERVISOR', 'LINE-CARD', 'FABRIC-CARD']
MIDPLANE_SUPP_MODULE = ['SUPERVISOR', 'LINE-CARD']
MODULE_STATUS = ['Empty', 'Offline', 'PoweredDown', 'Present', 'Fault', 'Online']
# TODO: EEPROM info is duplicated with chassis.py. Break out into a shared module
# Valid OCP ONIE TlvInfo EEPROM type codes as defined here:
# https://opencomputeproject.github.io/onie/design-spec/hw_requirements.html
ONIE_TLVINFO_TYPE_CODE_PRODUCT_NAME = '0x21' # Product Name
ONIE_TLVINFO_TYPE_CODE_PART_NUMBER = '0x22' # Part Number
ONIE_TLVINFO_TYPE_CODE_SERIAL_NUMBER = '0x23' # Serial Number
ONIE_TLVINFO_TYPE_CODE_BASE_MAC_ADDR = '0x24' # Base MAC Address
ONIE_TLVINFO_TYPE_CODE_MFR_DATE = '0x25' # Manufacture Date
ONIE_TLVINFO_TYPE_CODE_DEVICE_VERSION = '0x26' # Device Version
ONIE_TLVINFO_TYPE_CODE_LABEL_REVISION = '0x27' # Label Revision
ONIE_TLVINFO_TYPE_CODE_PLATFORM_NAME = '0x28' # Platform Name
ONIE_TLVINFO_TYPE_CODE_ONIE_VERSION = '0x29' # ONIE Version
ONIE_TLVINFO_TYPE_CODE_NUM_MACS = '0x2A' # Number of MAC Addresses
ONIE_TLVINFO_TYPE_CODE_MANUFACTURER = '0x2B' # Manufacturer
ONIE_TLVINFO_TYPE_CODE_COUNTRY_CODE = '0x2C' # Country Code
ONIE_TLVINFO_TYPE_CODE_VENDOR = '0x2D' # Vendor
ONIE_TLVINFO_TYPE_CODE_DIAG_VERSION = '0x2E' # Diag Version
ONIE_TLVINFO_TYPE_CODE_SERVICE_TAG = '0x2F' # Service Tag
ONIE_TLVINFO_TYPE_CODE_VENDOR_EXT = '0xFD' # Vendor Extension
ONIE_TLVINFO_TYPE_CODE_CRC32 = '0xFE' # CRC-32
class TestModuleApi(PlatformApiTestBase):
"""Platform API test cases for the Module class"""
num_modules = None
# This fixture would probably be better scoped at the class level, but
# it relies on the platform_api_conn_per_supervisor fixture, which is scoped at the function
# level, so we must do the same here to prevent a scope mismatch.
@pytest.fixture(scope="function", autouse=True)
def setup(self, platform_api_conn):
if self.num_modules is None:
try:
self.num_modules = int(chassis.get_num_modules(platform_api_conn))
except:
pytest.fail("num_modules is not an integer")
else:
if self.num_modules == 0:
pytest.skip("No modules found on device")
#
# Functions to test methods inherited from DeviceBase class
#
@pytest.fixture(scope="function", autouse=True)
def get_skip_mod_list(self, duthosts, enum_rand_one_per_hwsku_hostname):
duthost = duthosts[enum_rand_one_per_hwsku_hostname]
self.skip_mod_list = get_skip_mod_list(duthost)
def skip_absent_module(self, module_num, platform_api_conn):
name = module.get_name(platform_api_conn, module_num)
if name in self.skip_mod_list:
logger.info("Skipping module {} since it is part of skip_mod_list".format(name))
return True
return False
def skip_module_other_than_myself(self, module_num, platform_api_conn):
if chassis.is_modular_chassis(platform_api_conn):
name = module.get_name(platform_api_conn, module_num)
module_slot = module.get_slot(platform_api_conn, module_num)
chassis_slot = chassis.get_my_slot(platform_api_conn)
if module_slot != chassis_slot:
logger.info("Skipping module {} since it is not the same module as myself".format(name))
return True
return False
return False
def test_get_name(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn):
for i in range(self.num_modules):
if self.skip_absent_module(i,platform_api_conn):
continue
name = module.get_name(platform_api_conn, i)
if self.expect(name is not None, "Unable to retrieve module {} name".format(i)):
self.expect(isinstance(name, STRING_TYPE), "Module {} name appears incorrect".format(i))
self.assert_expectations()
def test_get_presence(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn):
for i in range(self.num_modules):
presence = module.get_presence(platform_api_conn, i)
if self.expect(presence is not None, "Unable to retrieve module {} presence".format(i)):
if self.expect(isinstance(presence, bool), "Module {} presence appears incorrect".format(i)):
name = module.get_name(platform_api_conn, i)
if name not in self.skip_mod_list:
self.expect(presence is True, "Module {} is not present".format(i))
else:
logger.info("Skipping module {} since it is part of skip_mod_list".format(name))
self.assert_expectations()
def test_get_model(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn):
for i in range(self.num_modules):
if self.skip_absent_module(i,platform_api_conn):
continue
if self.skip_module_other_than_myself(i,platform_api_conn):
continue
model = module.get_model(platform_api_conn, i)
if self.expect(model is not None, "Unable to retrieve module {} model".format(i)):
self.expect(isinstance(model, STRING_TYPE), "Module {} model appears incorrect".format(i))
self.assert_expectations()
def test_get_serial(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn):
for i in range(self.num_modules):
if self.skip_absent_module(i,platform_api_conn):
continue
if self.skip_module_other_than_myself(i,platform_api_conn):
continue
serial = module.get_serial(platform_api_conn, i)
if self.expect(serial is not None, "Module {}: Failed to retrieve serial number".format(i)):
self.expect(isinstance(serial, STRING_TYPE), "Module {} serial number appears incorrect".format(i))
self.assert_expectations()
def test_get_status(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn):
for i in range(self.num_modules):
if self.skip_absent_module(i,platform_api_conn):
continue
status = module.get_status(platform_api_conn, i)
if self.expect(status is not None, "Unable to retrieve module {} status".format(i)):
self.expect(isinstance(status, bool), "Module {} status appears incorrect".format(i))
self.assert_expectations()
def test_get_position_in_parent(self, platform_api_conn):
for i in range(self.num_modules):
if self.skip_absent_module(i,platform_api_conn):
continue
position = module.get_position_in_parent(platform_api_conn, i)
if self.expect(position is not None, "Failed to perform get_position_in_parent for module {}".format(i)):
self.expect(isinstance(position, int), "Position value must be an integer value for module {}".format(i))
self.assert_expectations()
def test_is_replaceable(self, platform_api_conn):
for i in range(self.num_modules):
if self.skip_absent_module(i,platform_api_conn):
continue
replaceable = module.is_replaceable(platform_api_conn, i)
if self.expect(replaceable is not None, "Failed to perform is_replaceable for module {}".format(i)):
self.expect(isinstance(replaceable, bool), "Replaceable value must be a bool value for module {}".format(i))
self.assert_expectations()
#
# Functions to test methods defined in ModuleBase class
#
def test_get_base_mac(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn):
# Ensure the base MAC address of each module is sane
# TODO: Add expected base MAC address for each module to inventory file and compare against it
for i in range(self.num_modules):
if self.skip_absent_module(i,platform_api_conn):
continue
if self.skip_module_other_than_myself(i,platform_api_conn):
continue
# Need to skip FABRIC-CARDx as they do not have base_mac assigned
mod_name = module.get_name(platform_api_conn, i)
if "FABRIC-CARD" in mod_name:
logger.info("skipping get_base_mac for module {} which is a Fabric Card".format(mod_name))
continue
base_mac = module.get_base_mac(platform_api_conn, i)
if not self.expect(base_mac is not None, "Module {}: Failed to retrieve base MAC address".format(i)):
continue
self.expect(re.match(REGEX_MAC_ADDRESS, base_mac), "Module {}: Base MAC address appears to be incorrect".format(i))
self.assert_expectations()
def test_get_system_eeprom_info(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn):
"""
Test that we can retrieve sane system EEPROM info from each module of the DUT via the platform API
"""
# OCP ONIE TlvInfo EEPROM type codes defined here: https://opencomputeproject.github.io/onie/design-spec/hw_requirements.html
VALID_ONIE_TLVINFO_TYPE_CODES_LIST = list(map(str.lower,[
ONIE_TLVINFO_TYPE_CODE_PRODUCT_NAME,
ONIE_TLVINFO_TYPE_CODE_PART_NUMBER,
ONIE_TLVINFO_TYPE_CODE_SERIAL_NUMBER,
ONIE_TLVINFO_TYPE_CODE_BASE_MAC_ADDR,
ONIE_TLVINFO_TYPE_CODE_MFR_DATE,
ONIE_TLVINFO_TYPE_CODE_DEVICE_VERSION,
ONIE_TLVINFO_TYPE_CODE_LABEL_REVISION,
ONIE_TLVINFO_TYPE_CODE_PLATFORM_NAME,
ONIE_TLVINFO_TYPE_CODE_ONIE_VERSION,
ONIE_TLVINFO_TYPE_CODE_NUM_MACS,
ONIE_TLVINFO_TYPE_CODE_MANUFACTURER,
ONIE_TLVINFO_TYPE_CODE_COUNTRY_CODE,
ONIE_TLVINFO_TYPE_CODE_VENDOR,
ONIE_TLVINFO_TYPE_CODE_DIAG_VERSION,
ONIE_TLVINFO_TYPE_CODE_SERVICE_TAG,
ONIE_TLVINFO_TYPE_CODE_VENDOR_EXT,
ONIE_TLVINFO_TYPE_CODE_CRC32
]))
MINIMUM_REQUIRED_TYPE_CODES_LIST = list(map(str.lower,[
ONIE_TLVINFO_TYPE_CODE_SERIAL_NUMBER,
ONIE_TLVINFO_TYPE_CODE_BASE_MAC_ADDR,
ONIE_TLVINFO_TYPE_CODE_CRC32
]))
# TODO: Add expected system EEPROM info for each module to inventory file and compare against it
for i in range(self.num_modules):
if self.skip_absent_module(i,platform_api_conn):
continue
if self.skip_module_other_than_myself(i,platform_api_conn):
continue
# Need to skip FABRIC-CARDx as they do not have system_eeprom info
mod_name = module.get_name(platform_api_conn, i)
if "FABRIC-CARD" in mod_name:
logger.info("skipping test_get_system_eeprom_info for module {} which is a Fabric Card".format(mod_name))
continue
syseeprom_info_dict = module.get_system_eeprom_info(platform_api_conn, i)
if not self.expect(syseeprom_info_dict is not None, "Module {}: Failed to retrieve system EEPROM data".format(i)):
continue
if not self.expect(isinstance(syseeprom_info_dict, dict), "Module {}: System EEPROM data is not in the expected format".format(i)):
continue
# Some vendors returns unicode string with lower/mixed case which fails the TLV validation tests.
# so we always convert eveything to lower case string to perform the comparison to make this check more robust.
syseeprom_type_codes_list = list(map(str.lower,[str(x) for x in list(syseeprom_info_dict.keys())]))
# Ensure that all keys in the resulting dictionary are valid ONIE TlvInfo type codes
self.expect(set(syseeprom_type_codes_list) <= set(VALID_ONIE_TLVINFO_TYPE_CODES_LIST), "Module {}: Invalid TlvInfo type code found".format(i))
# Ensure that we were able to obtain the minimum required type codes
self.expect(set(MINIMUM_REQUIRED_TYPE_CODES_LIST) <= set(syseeprom_type_codes_list), "Module {}: Minimum required TlvInfo type codes not provided".format(i))
# Ensure the base MAC address is sane
base_mac = syseeprom_info_dict[ONIE_TLVINFO_TYPE_CODE_BASE_MAC_ADDR]
self.expect(base_mac is not None, "Module {}: Failed to retrieve base MAC address".format(i))
self.expect(re.match(REGEX_MAC_ADDRESS, base_mac), "Module {}: Base MAC address appears to be incorrect".format(i))
# Ensure the serial number is sane
serial = syseeprom_info_dict[ONIE_TLVINFO_TYPE_CODE_SERIAL_NUMBER]
self.expect(serial is not None, "Module {}: Failed to retrieve serial number".format(i))
self.expect(re.match(REGEX_SERIAL_NUMBER, serial), "Module {}: Serial number appears to be incorrect".format(i))
self.assert_expectations()
def test_components(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn):
# TODO: Ensure the number of components and that the returned list is correct for this platform
for mod_idx in range(self.num_modules):
try:
num_components = int(module.get_num_components(platform_api_conn, mod_idx))
except:
pytest.fail("Module {}: num_components is not an integer".format(mod_idx))
component_list = module.get_all_components(platform_api_conn, mod_idx)
if not self.expect(component_list is not None, "Module {}: Failed to retrieve components".format(mod_idx)):
continue
self.expect(isinstance(component_list, list) and len(component_list) == num_components, "Module {}: Components appear to be incorrect".format(mod_idx))
for comp_idx in range(num_components):
component = module.get_component(platform_api_conn, mod_idx, comp_idx)
self.expect(component and component == component_list[comp_idx], "Module {}: Component {} is incorrect".format(mod_idx, comp_idx))
self.assert_expectations()
def test_fans(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn):
# TODO: Ensure the number of fans and that the returned list is correct for this platform
for mod_idx in range(self.num_modules):
try:
num_fans = int(module.get_num_fans(platform_api_conn, mod_idx))
except:
pytest.fail("Module {}: num_fans is not an integer".format(mod_idx))
fan_list = module.get_all_fans(platform_api_conn, mod_idx)
if not self.expect(fan_list is not None, "Module {}: Failed to retrieve fans".format(mod_idx)):
continue
self.expect(isinstance(fan_list, list) and len(fan_list) == num_fans, "Module {}: Fans appear to be incorrect".format(mod_idx))
for fan_idx in range(num_fans):
fan = module.get_fan(platform_api_conn, mod_idx, fan_idx)
self.expect(fan and fan == fan_list[fan_idx], "Module {}: Fan {} is incorrect".format(mod_idx, fan_idx))
self.assert_expectations()
def test_psus(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn):
# TODO: Ensure the number of PSUs and that the returned list is correct for this platform
for mod_idx in range(self.num_modules):
try:
num_psus = int(module.get_num_psus(platform_api_conn, mod_idx))
except:
pytest.fail("Module {}: num_psus is not an integer".format(mod_idx))
psu_list = module.get_all_psus(platform_api_conn, mod_idx)
if not self.expect(psu_list is not None, "Module {}: Failed to retrieve PSUs".format(mod_idx)):
continue
self.expect(isinstance(psu_list, list) and len(psu_list) == num_psus, "Module {}: PSUs appear to be incorrect".format(mod_idx))
for psu_idx in range(num_psus):
psu = module.get_psu(platform_api_conn, mod_idx, psu_idx)
self.expect(psu and psu == psu_list[psu_idx], "Module {}: PSU {} is incorrect".format(mod_idx, psu_idx))
self.assert_expectations()
def test_thermals(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn):
# TODO: Ensure the number of thermals and that the returned list is correct for this platform
for mod_idx in range(self.num_modules):
try:
num_thermals = int(module.get_num_thermals(platform_api_conn, mod_idx))
except:
pytest.fail("Module {}: num_thermals is not an integer".format(mod_idx))
thermal_list = module.get_all_thermals(platform_api_conn, mod_idx)
if not self.expect(thermal_list is not None, "Module {}: Failed to retrieve thermals".format(mod_idx)):
continue
self.expect(isinstance(thermal_list, list) and len(thermal_list) == num_thermals, "Module {}: Thermals appear to be incorrect".format(mod_idx))
for therm_idx in range(num_thermals):
thermal = module.get_thermal(platform_api_conn, mod_idx, therm_idx)
self.expect(thermal and thermal == thermal_list[therm_idx], "Thermal {} is incorrect".format(mod_idx, therm_idx))
self.assert_expectations()
def test_sfps(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn):
# TODO: Ensure the number of SFPs and that the returned list is correct for this platform
for mod_idx in range(self.num_modules):
try:
num_sfps = int(module.get_num_sfps(platform_api_conn, mod_idx))
except:
pytest.fail("Module {}: num_sfps is not an integer".format(mod_idx))
sfp_list = module.get_all_sfps(platform_api_conn, mod_idx)
if not self.expect(sfp_list is not None, "Module {}: Failed to retrieve SFPs".format(mod_idx)):
continue
self.expect(isinstance(sfp_list, list) and len(sfp_list) == num_sfps, "Module {}: SFPs appear to be incorrect".format(mod_idx))
for sfp_idx in range(num_sfps):
sfp = module.get_sfp(platform_api_conn, mod_idx, sfp_idx)
self.expect(sfp and sfp == sfp_list[sfp_idx], "Module {}: SFP {} is incorrect".format(mod_idx, sfp_idx))
self.assert_expectations()
def test_get_description(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn):
for i in range(self.num_modules):
if self.skip_absent_module(i,platform_api_conn):
continue
description = module.get_description(platform_api_conn, i)
if self.expect(description is not None, "Unable to retrieve module {} description".format(i)):
self.expect(isinstance(description, STRING_TYPE), "Module {} description appears incorrect".format(i))
self.assert_expectations()
def test_get_slot(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn):
for i in range(self.num_modules):
if self.skip_absent_module(i,platform_api_conn):
continue
slot_id = module.get_slot(platform_api_conn, i)
if self.expect(slot_id is not None, "Unable to retrieve module {} slot id".format(i)):
self.expect(isinstance(slot_id, int) or isinstance(slot_id, STRING_TYPE), "Module {} slot id is not correct ".format(i))
self.assert_expectations()
def test_get_type(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn):
for i in range(self.num_modules):
if self.skip_absent_module(i,platform_api_conn):
continue
mod_type = module.get_type(platform_api_conn, i)
if self.expect(mod_type is not None, "Unable to retrieve module {} slot id".format(i)):
self.expect(isinstance(mod_type, STRING_TYPE), "Module {} type format appears not correct ".format(i))
self.expect((mod_type in MODULE_TYPE), "Module {} type appears not to be correct")
self.assert_expectations()
def test_get_maximum_consumed_power(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn):
for i in range(self.num_modules):
if self.skip_absent_module(i,platform_api_conn):
continue
mod_max_con_power = module.get_maximum_consumed_power(platform_api_conn, i)
if self.expect(mod_max_con_power is not None, "Unable to retrieve module {} slot id".format(i)):
self.expect(isinstance(mod_max_con_power, float),
"Module {} max consumed power format appears incorrect ".format(i))
self.assert_expectations()
def test_get_midplane_ip(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn):
for i in range(self.num_modules):
if self.skip_absent_module(i,platform_api_conn):
continue
module_type = module.get_type(platform_api_conn, i)
if module_type in MIDPLANE_SUPP_MODULE:
midplane_ip = module.get_midplane_ip(platform_api_conn, i)
if self.expect(midplane_ip is not None, "Unable to retrieve module {} midplane ip".format(i)):
self.expect(re.match(REGEX_IP_ADDRESS, midplane_ip), "Module {} midplane ip appears incorrect".format(i))
self.assert_expectations()
def test_is_midplane_reachable(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn):
for i in range(self.num_modules):
if self.skip_absent_module(i,platform_api_conn):
continue
module_type = module.get_type(platform_api_conn, i)
if module_type in MIDPLANE_SUPP_MODULE:
midplane_status = module.is_midplane_reachable(platform_api_conn, i)
if self.expect(midplane_status is not None, "Unable to retrieve module {} midplane reachability".format(i)):
self.expect(isinstance(midplane_status, bool), "Module {} midplabe reachability appears incorrect".format(i))
self.assert_expectations()
def test_get_oper_status(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn):
for i in range(self.num_modules):
if self.skip_absent_module(i,platform_api_conn):
continue
status = module.get_oper_status(platform_api_conn, i)
if self.expect(status is not None, "Unable to retrieve module {} status".format(i)):
self.expect(isinstance(status, STRING_TYPE), "Module {} status appears incorrect".format(i))
self.expect(status in MODULE_STATUS, "Module {} status {} is invalid value".format(i, status))
self.assert_expectations()
def test_reboot(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn):
reboot_type = 'default'
reboot_timeout = 300
for mod_idx in range(self.num_modules):
mod_name = module.get_name(platform_api_conn, mod_idx)
if self.skip_module_other_than_myself(mod_idx,platform_api_conn):
continue
if mod_name in self.skip_mod_list:
logger.info("skipping reboot for module {} ".format(mod_name))
else:
module_reboot = module.reboot(platform_api_conn, mod_idx, reboot_type)
pytest_assert(module_reboot == bool(True), "module {} reboot failed".format(mod_idx))
sleep(reboot_timeout)
mod_status = module.get_oper_status(platform_api_conn, mod_idx)
pytest_assert(mod_status == "Online", "module {} boot up successful".format(mod_idx))
| [
"noreply@github.com"
] | stepanblyschak.noreply@github.com |
919f4cea47fb24fd61b0b8ef0e6c3b13d4cbcbb2 | cced1f1ad18c6d9c3b96b2ae53cac8e86846f1f5 | /Blog/comment/templatetags/comment.py | 986e54415fdcb2915afba673ef52a43bd8aa4d30 | [] | no_license | sug5806/portfolio | a3904be506a3746e16da57bba5926c38743783ad | b943955a52c622094a58fb9124323298261ae80a | refs/heads/master | 2022-12-10T06:23:38.472893 | 2019-07-05T04:56:59 | 2019-07-05T04:56:59 | 190,156,107 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 827 | py | from django.contrib.contenttypes.models import ContentType
from django.template import Library
from django.template.loader import render_to_string
register = Library()
from comment.forms import CommentForm
from comment.models import Comment
@register.simple_tag(takes_context=True)
def show_comment(context, content_type, object_id):
# 폼 만들기
content_type = ContentType.objects.get_for_model(content_type)
form = CommentForm(initial={'content_type': content_type, 'object_id': object_id})
# 해당 하는 댓글 목록 뽑기
comments = Comment.objects.filter(content_type=content_type, object_id=object_id).all()
# 템플릿 렌더링
return render_to_string('comment/show_comment.html', {'form': form, 'object_list': comments},
request=context['request'])
| [
"sug5806@gmail.com"
] | sug5806@gmail.com |
07c38900b1136a95de01872886ab8a1d18c0f9fd | 63598a5a625265f01d2ab68475db7277b4649da0 | /minet/cli/facebook/utils.py | fc6f8f019e75ea8322d1e2c9ec4f95ef1e2b2f16 | [] | no_license | metinefendie/minet | dcb83829ccd574c94301b65a9a1c071b85ceb7c2 | 072723d75ed7c60c370866c92da0f299fe12da21 | refs/heads/master | 2022-04-25T13:35:31.998650 | 2020-04-23T16:51:59 | 2020-04-23T16:51:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,479 | py | # =============================================================================
# Minet Facebook CLI Action Utils
# =============================================================================
#
# Miscellaneous helpers used by `minet fb`.
#
from http.cookies import SimpleCookie
from minet.utils import grab_cookies
from minet.cli.utils import die
FACEBOOK_URL = 'https://www.facebook.com/'
def fix_cookie(cookie_string):
cookie = SimpleCookie()
cookie.load(cookie_string)
# NOTE: those cookie items can rat you out
try:
del cookie['m_pixel_ratio']
del cookie['wd']
except KeyError:
pass
cookie['locale'] = 'en_US'
return '; '.join(key + '=' + morsel.coded_value for key, morsel in cookie.items())
def grab_facebook_cookie(namespace):
if namespace.cookie == 'firefox' or namespace.cookie == 'chrome':
get_cookie_for_url = grab_cookies(namespace.cookie)
if get_cookie_for_url is None:
die('Could not extract cookies from %s.' % namespace.cookie)
cookie = get_cookie_for_url(FACEBOOK_URL)
else:
cookie = namespace.cookie.strip()
if not cookie:
die([
'Relevant cookie not found.',
'A Facebook authentication cookie is necessary to be able to access Facebook pages.',
'Use the --cookie flag to choose a browser from which to extract the cookie or give your cookie directly.'
])
return fix_cookie(cookie)
| [
"guillaumeplique@gmail.com"
] | guillaumeplique@gmail.com |
b653a43ed41620c6a44b82f778844b4d44bfd45b | 536aa3e7833ead462ccb087e827f2a490e1b5216 | /stripe/api_resources/payout.py | 8227c4573a00b7fce53dc1804bcdc34647dbb857 | [
"MIT"
] | permissive | condemane/stripe-python | 83b6026fec6742535b352a396622453fb6fc8f84 | 1ca277051cf212c6ac8a22b49321cfe9e1ffd405 | refs/heads/master | 2021-07-23T16:39:05.896112 | 2017-11-03T02:47:45 | 2017-11-03T02:47:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 509 | py | from __future__ import absolute_import, division, print_function
from stripe.api_resources.abstract import CreateableAPIResource
from stripe.api_resources.abstract import UpdateableAPIResource
from stripe.api_resources.abstract import ListableAPIResource
class Payout(CreateableAPIResource, UpdateableAPIResource,
ListableAPIResource):
OBJECT_NAME = 'payout'
def cancel(self):
self.refresh_from(self.request('post',
self.instance_url() + '/cancel'))
| [
"ob@stripe.com"
] | ob@stripe.com |
060f65c40048132d59b778c41e50f1d54ffe6d33 | 0130c8b14927097663157846adc4b146d67d2fda | /tests/common/test_run/apply_rms_prop_run.py | 7db08112a030e82d216c1e0b4300a05ddbe3e473 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-3-Clause",
"NCSA",
"LLVM-exception",
"Zlib",
"BSD-2-Clause",
"MIT"
] | permissive | Shigangli/akg | e8be3e0ee1eafe3e42b4cc4d424c28f08ef4c0bc | 3766c54e0b109541932d147a6b5643a334b82403 | refs/heads/master | 2023-09-06T05:13:40.571583 | 2021-11-23T03:44:54 | 2021-11-23T03:44:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,815 | py | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""apply_rms_prop_run"""
import numpy as np
from tests.common.tensorio import compare_tensor
from akg.utils import kernel_exec as utils
from tests.common.test_op import apply_rms_prop
from tests.common.base import get_rtol_atol
from tests.common.gen_random import random_gaussian
def apply_rms_prop_run(shape, dtype, lr, momentum, rho, epsilon, attrs=None):
"""run function for dsl function apply_rms_prop."""
if attrs is None:
attrs = {}
dtype = dtype.lower()
shapes = [shape, shape, shape, shape, (1,), (1,), (1,)]
types = [dtype, dtype, dtype, dtype, dtype, dtype, dtype]
op_attrs = [epsilon]
if 'tuning' in attrs.keys():
t = attrs.get("tuning", False)
kernel_name = attrs.get("kernel_name", False)
mod = utils.op_build_test(apply_rms_prop.apply_rms_prop, shapes, types,
op_attrs=op_attrs, kernel_name=kernel_name, attrs=attrs, tuning=t)
if t:
_, expects, args = gen_data(shape, dtype, lr, momentum, rho, epsilon)
return mod, expects, args
return mod
mod = utils.op_build_test(apply_rms_prop.apply_rms_prop, shapes, types,
op_attrs=op_attrs, kernel_name="apply_rms_prop", attrs=attrs)
inputs, expects, args = gen_data(shape, dtype, lr, momentum, rho, epsilon)
outputs = utils.mod_launch(mod, args, outputs=(0, 1, 2), expect=expects)
rtol, atol = get_rtol_atol("apply_rms_prop", dtype)
results = list(map(lambda x, y: compare_tensor(x, y, rtol=rtol, atol=atol), outputs, expects))
return inputs, outputs, expects, all(results)
def gen_data(shape, dtype, lr, momentum, rho, epsilon):
"""Generates input, output and expect data."""
var = random_gaussian(shape, miu=10, sigma=1.0).astype(dtype)
ms = np.abs(random_gaussian(shape, miu=4, sigma=0.1).astype(dtype))
mom = random_gaussian(shape, miu=3, sigma=0.3).astype(dtype)
grad = random_gaussian(shape, miu=3, sigma=0.3).astype(dtype)
lr = np.array([lr]).astype(dtype)
momentum = np.array([momentum]).astype(dtype)
rho = np.array([rho]).astype(dtype)
inputs = [var, ms, mom, grad, lr, momentum, rho]
expects = apply_rms_prop_compute(var, ms, mom, grad, lr, momentum, rho, epsilon)
args = inputs
return inputs, expects, args
def apply_rms_prop_compute(var, ms, mom, grad, lr, momentum, rho, epsilon):
compute_dtype = "float32"
dtype = var.dtype
if dtype != compute_dtype:
var, ms, mom, grad, lr, momentum, rho = [t.astype(compute_dtype) for t in [
var, ms, mom, grad, lr, momentum, rho]]
# ms = rho * ms + (1-rho) * grad * grad
# mom = momentum * mom + lr * grad / sqrt(ms + epsilon)
# var = var - mom
one = np.array([1.0]).astype(compute_dtype)
ms_1 = rho * ms
ms_2 = (one - rho) * grad * grad
ms_update = ms_1 + ms_2
mom_1 = momentum * mom
mom_2_1 = lr * grad
mom_2_2 = one / np.sqrt(ms_update + epsilon)
mom_3 = mom_2_1 * mom_2_2
mom_update = mom_1 + mom_3
var_update = var - mom_update
expects = [var_update, ms_update, mom_update]
if var_update.dtype != dtype:
expects = [t.astype(dtype) for t in expects]
return expects
| [
"1027252281@qq.com"
] | 1027252281@qq.com |
7eb6a5d9af4f57d491515c07e0c11b67f9e29645 | e6c65e2e354336a4bea5b6a4ccbccd3682915fe2 | /out-bin/py/google/fhir/models/model_test.runfiles/com_google_fhir/external/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/contrib/model_pruning/python/layers/layers.py | e255ebdd6e43d2e1813249df0d0bd09c09554c6d | [
"Apache-2.0"
] | permissive | rasalt/fhir-datalab | c30ab773d84983dd04a37e9d0ddec8bf2824b8a4 | 3e329fc8b4226d3e3a4a7c23c306a86e7a9ea0de | refs/heads/master | 2021-10-09T05:51:04.593416 | 2018-12-21T18:11:03 | 2018-12-22T05:38:32 | 162,744,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | /home/rkharwar/.cache/bazel/_bazel_rkharwar/0ddaa3627472ad9d1367a008236ce2f5/external/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/contrib/model_pruning/python/layers/layers.py | [
"ruchika.kharwar@gmail.com"
] | ruchika.kharwar@gmail.com |
5bee5290eb8c83bb54a8d79b006d8dfc6e35367b | 019b885fb971359524943730af2d6b67e6d322d5 | /build/lib/presalytics_story/models/permission_type.py | fdb4ad98c9cf89b36528b372f37176843c1dae6c | [
"MIT"
] | permissive | presalytics/story-python-client | ab9ce85e680dad2ceb70832935cc03318b6f0b3f | 48ac7830b85d65b94a9f6bbfc0c7ee8344327084 | refs/heads/master | 2020-08-04T23:22:34.877485 | 2019-12-27T22:54:30 | 2019-12-27T22:54:30 | 212,312,466 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,297 | py | # coding: utf-8
"""
Communcations
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 0.1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class PermissionType(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'created_at': 'datetime',
'created_by': 'str',
'id': 'str',
'updated_at': 'datetime',
'updated_by': 'str',
'can_add_collaborators': 'bool',
'can_delete': 'bool',
'can_edit': 'bool',
'can_view': 'bool',
'name': 'str'
}
attribute_map = {
'created_at': 'created_at',
'created_by': 'created_by',
'id': 'id',
'updated_at': 'updated_at',
'updated_by': 'updated_by',
'can_add_collaborators': 'can_add_collaborators',
'can_delete': 'can_delete',
'can_edit': 'can_edit',
'can_view': 'can_view',
'name': 'name'
}
def __init__(self, created_at=None, created_by=None, id=None, updated_at=None, updated_by=None, can_add_collaborators=None, can_delete=None, can_edit=None, can_view=None, name=None): # noqa: E501
"""PermissionType - a model defined in OpenAPI""" # noqa: E501
self._created_at = None
self._created_by = None
self._id = None
self._updated_at = None
self._updated_by = None
self._can_add_collaborators = None
self._can_delete = None
self._can_edit = None
self._can_view = None
self._name = None
self.discriminator = None
if created_at is not None:
self.created_at = created_at
if created_by is not None:
self.created_by = created_by
if id is not None:
self.id = id
if updated_at is not None:
self.updated_at = updated_at
if updated_by is not None:
self.updated_by = updated_by
if can_add_collaborators is not None:
self.can_add_collaborators = can_add_collaborators
if can_delete is not None:
self.can_delete = can_delete
if can_edit is not None:
self.can_edit = can_edit
if can_view is not None:
self.can_view = can_view
if name is not None:
self.name = name
@property
def created_at(self):
"""Gets the created_at of this PermissionType. # noqa: E501
:return: The created_at of this PermissionType. # noqa: E501
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this PermissionType.
:param created_at: The created_at of this PermissionType. # noqa: E501
:type: datetime
"""
self._created_at = created_at
@property
def created_by(self):
"""Gets the created_by of this PermissionType. # noqa: E501
:return: The created_by of this PermissionType. # noqa: E501
:rtype: str
"""
return self._created_by
@created_by.setter
def created_by(self, created_by):
"""Sets the created_by of this PermissionType.
:param created_by: The created_by of this PermissionType. # noqa: E501
:type: str
"""
self._created_by = created_by
@property
def id(self):
"""Gets the id of this PermissionType. # noqa: E501
:return: The id of this PermissionType. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this PermissionType.
:param id: The id of this PermissionType. # noqa: E501
:type: str
"""
self._id = id
@property
def updated_at(self):
"""Gets the updated_at of this PermissionType. # noqa: E501
:return: The updated_at of this PermissionType. # noqa: E501
:rtype: datetime
"""
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
"""Sets the updated_at of this PermissionType.
:param updated_at: The updated_at of this PermissionType. # noqa: E501
:type: datetime
"""
self._updated_at = updated_at
@property
def updated_by(self):
"""Gets the updated_by of this PermissionType. # noqa: E501
:return: The updated_by of this PermissionType. # noqa: E501
:rtype: str
"""
return self._updated_by
@updated_by.setter
def updated_by(self, updated_by):
"""Sets the updated_by of this PermissionType.
:param updated_by: The updated_by of this PermissionType. # noqa: E501
:type: str
"""
self._updated_by = updated_by
@property
def can_add_collaborators(self):
"""Gets the can_add_collaborators of this PermissionType. # noqa: E501
:return: The can_add_collaborators of this PermissionType. # noqa: E501
:rtype: bool
"""
return self._can_add_collaborators
@can_add_collaborators.setter
def can_add_collaborators(self, can_add_collaborators):
"""Sets the can_add_collaborators of this PermissionType.
:param can_add_collaborators: The can_add_collaborators of this PermissionType. # noqa: E501
:type: bool
"""
self._can_add_collaborators = can_add_collaborators
@property
def can_delete(self):
"""Gets the can_delete of this PermissionType. # noqa: E501
:return: The can_delete of this PermissionType. # noqa: E501
:rtype: bool
"""
return self._can_delete
@can_delete.setter
def can_delete(self, can_delete):
"""Sets the can_delete of this PermissionType.
:param can_delete: The can_delete of this PermissionType. # noqa: E501
:type: bool
"""
self._can_delete = can_delete
@property
def can_edit(self):
"""Gets the can_edit of this PermissionType. # noqa: E501
:return: The can_edit of this PermissionType. # noqa: E501
:rtype: bool
"""
return self._can_edit
@can_edit.setter
def can_edit(self, can_edit):
"""Sets the can_edit of this PermissionType.
:param can_edit: The can_edit of this PermissionType. # noqa: E501
:type: bool
"""
self._can_edit = can_edit
@property
def can_view(self):
"""Gets the can_view of this PermissionType. # noqa: E501
:return: The can_view of this PermissionType. # noqa: E501
:rtype: bool
"""
return self._can_view
@can_view.setter
def can_view(self, can_view):
"""Sets the can_view of this PermissionType.
:param can_view: The can_view of this PermissionType. # noqa: E501
:type: bool
"""
self._can_view = can_view
@property
def name(self):
"""Gets the name of this PermissionType. # noqa: E501
:return: The name of this PermissionType. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this PermissionType.
:param name: The name of this PermissionType. # noqa: E501
:type: str
"""
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PermissionType):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"kevin@chart-a-lot.com"
] | kevin@chart-a-lot.com |
fd1edb275f814cbcec1af4f8623c8ba04dfae2a3 | ccc81e5a1e6d2be66423e376919deec4b30875ce | /bot_led.py | c32a509bfbc5dc115091008e9e714a3b17fd4151 | [] | no_license | BitcoinOfThings/bot-pi | 04135b43d03430a2fcfe71e71330bd9b6ac2ba87 | 36556fef5f3a4e2916bb8464818ddb6f3c56c1b6 | refs/heads/master | 2020-09-26T08:05:17.102902 | 2019-12-26T22:44:37 | 2019-12-26T22:44:37 | 226,211,976 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,010 | py | # listens for mqtt messages and controls led connected to rpi
import paho.mqtt.client as mqtt
import RPi.GPIO as GPIO
import json
#
LED = 32
def on_message(client, userdata, message) :
try:
#print(message.payload)
msg = json.loads(message.payload.decode('utf8'))
#print(msg["message"])
val = int(msg["message"])
print(val)
if (val == 0):
GPIO.output(LED, GPIO.LOW)
else:
GPIO.output(LED, GPIO.HIGH)
except Exception as ex:
print(ex)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(LED, GPIO.OUT)
GPIO.output(LED, GPIO.LOW)
mqttc = mqtt.Client()
mqttc.username_pw_set(username="demo", password="demo")
mqttc.connect("mqtt.bitcoinofthings.com")
#mqttc.loop_start()
mqttc.on_message = on_message
mqttc.subscribe("demo")
try :
print("Send 0 or 1 to bot_demo to turn the LED on or off")
mqttc.loop_forever()
except KeyboardInterrupt:
pass
#cleanup resets the pin therefore the led will be shut off
GPIO.cleanup()
| [
"dfoderick@gmail.com"
] | dfoderick@gmail.com |
59a353c86bbd81769180b33bf609a5936e95282c | daf28ed3f20340a136f19d582019fbd2554b6ea6 | /python/Ds_phikkpi_cfg.py | bc75d02bbc626a93145b58b803555864b749fd35 | [] | no_license | NiharSaha/Ds_MC_Gen_fragment | ccb4d48a80e46c260da0c7c7e821ad4da12f6a43 | b9bf94cb4f0cf4ffa2619e4671a8c70ca37aaf5d | refs/heads/master | 2022-01-08T10:31:43.544326 | 2018-04-12T19:45:30 | 2018-04-12T19:45:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,729 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
from GeneratorInterface.EvtGenInterface.EvtGenSetting_cff import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
pythiaPylistVerbosity = cms.untracked.int32(0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(5020.0),
maxEventsToPrint = cms.untracked.int32(0),
ExternalDecays = cms.PSet(
EvtGen130 = cms.untracked.PSet(
decay_table = cms.string('GeneratorInterface/EvtGenInterface/data/DECAY_2010.DEC'),
operates_on_particles = cms.vint32(),
particle_property_file = cms.FileInPath('GeneratorInterface/EvtGenInterface/data/evt.pdl'),
## user_decay_file = cms.vstring('Run2Ana/lambdapkpi/data/lambdaC_kstar892_kpi.dec'),
list_forced_decays = cms.vstring('MyD_s+','MyD_s-'),
user_decay_embedded= cms.vstring(
"""
Alias MyD_s+ D_s+
Alias MyD_s- D_s-
ChargeConj MyD_s- MyD_s+
Alias Myphi phi
Decay MyD_s+
1.000 Myphi pi+ SVS;
Enddecay
CDecay MyD_s-
Decay Myphi
1.000 K+ K- VSS;
Enddecay
End
"""
)
),
parameterSets = cms.vstring('EvtGen130')
),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'HardQCD:all = on',
'PhaseSpace:pTHatMin = 0.', #min pthat
),
parameterSets = cms.vstring(
'pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters',
)
)
)
generator.PythiaParameters.processParameters.extend(EvtGenExtraParticles)
DsDaufilter = cms.EDFilter("PythiaMomDauFilter",
ParticleID = cms.untracked.int32(431),
MomMinPt = cms.untracked.double(4.),
MomMinEta = cms.untracked.double(-2.4),
MomMaxEta = cms.untracked.double(2.4),
DaughterIDs = cms.untracked.vint32(333, 211),
NumberDaughters = cms.untracked.int32(2),
DaughterID = cms.untracked.int32(333),
DescendantsIDs = cms.untracked.vint32(321 , -321),
NumberDescendants = cms.untracked.int32(2),
)
Dsrapidityfilter = cms.EDFilter("PythiaFilter",
ParticleID = cms.untracked.int32(431),
MinPt = cms.untracked.double(4.),
MaxPt = cms.untracked.double(500.),
MinRapidity = cms.untracked.double(-1.2),
MaxRapidity = cms.untracked.double(1.2),
)
ProductionFilterSequence = cms.Sequence(generator*DsDaufilter*Dsrapidityfilter)
| [
"peng43@purdue.edu"
] | peng43@purdue.edu |
b1d5d7cd5f9f1fb3404761cf7c3fd4aae7a133e5 | a3fddbf8d953bce9b84173c1ba48780e849f86ef | /dave/to_nxspe.py | 63548c2bb5691e8e29ada6a7bd7e8c15082e47b9 | [] | no_license | rosswhitfield/wand | 79f99bef519ed9c334fddcb5396ab66d56f2903e | 562b1f89acb46749e220081117e2cbda2014df36 | refs/heads/master | 2021-06-02T05:38:00.741277 | 2021-04-14T13:19:18 | 2021-04-14T13:19:18 | 97,755,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 851 | py | from mantid.simpleapi import *
import numpy as np
import sys
E=36.9384
w = np.array([1.487,1.489])
van=LoadNexus('/SNS/users/rwp/wand/HB2C_2933_Van_processed_grouped.nxs')
for run in range(2952,4754):
ws = LoadEventNexus(Filename='/HFIR/HB2C/IPTS-7776/nexus/HB2C_{}.nxs.h5'.format(run))
ws = Integration(ws)
MaskDetectors(ws,DetectorList=range(16384))
ws = GroupDetectors(ws,CopyGroupingFromWorkspace=van)
ws.getAxis(0).setUnit("Wavelength")
for idx in xrange(ws.getNumberHistograms()):
ws.setX(idx, w)
ws=ws/van/ws.getRun().getProtonCharge()
ws=ConvertUnits(ws, Target='DeltaE', EMode='Direct', EFixed=E)
ws=Rebin(ws, Params='-0.1,0.2,0.3')
SaveNXSPE(ws, Filename='/HFIR/HB2C/IPTS-7776/shared/rwp/nxspe_group/HB2C_{}.nxspe'.format(run), Efixed=E, Psi=ws.getRun().getLogData('HB2C:Mot:s1').value[0])
| [
"whitfieldre@ornl.gov"
] | whitfieldre@ornl.gov |
486d98111acd3519a084b097828552d3cef1702d | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /Gauss_v45r10p1/Gen/DecFiles/options/12197021.py | af80b709441910822e79a0e64731bb6622b81026 | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,821 | py | # file /home/hep/ss4314/cmtuser/Gauss_v45r10p1/Gen/DecFiles/options/12197021.py generated: Wed, 25 Jan 2017 15:25:28
#
# Event Type: 12197021
#
# ASCII decay Descriptor: [B+ -> D*(2010)+ anti-D0 K*0]cc with D* forced to (D0->Kpi) pi+ and K* into Kpi
#
from Configurables import Generation
Generation().EventType = 12197021
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bu_DstD0Kst0,Kpi,Kpi=DecProdCut.dec"
Generation().SignalRepeatedHadronization.CutTool = "DaughtersInLHCb"
Generation().SignalRepeatedHadronization.SignalPIDList = [ 521,-521 ]
# Ad-hoc particle gun code
from Configurables import ParticleGun
pgun = ParticleGun("ParticleGun")
pgun.SignalPdgCode = 521
pgun.DecayTool = "EvtGenDecay"
pgun.GenCutTool = "DaughtersInLHCb"
from Configurables import FlatNParticles
pgun.NumberOfParticlesTool = "FlatNParticles"
pgun.addTool( FlatNParticles , name = "FlatNParticles" )
from Configurables import MomentumSpectrum
pgun.ParticleGunTool = "MomentumSpectrum"
pgun.addTool( MomentumSpectrum , name = "MomentumSpectrum" )
pgun.MomentumSpectrum.PdgCodes = [ 521,-521 ]
pgun.MomentumSpectrum.InputFile = "$PGUNSDATAROOT/data/Ebeam4000GeV/MomentumSpectrum_521.root"
pgun.MomentumSpectrum.BinningVariables = "pteta"
pgun.MomentumSpectrum.HistogramPath = "h_pteta"
from Configurables import BeamSpotSmearVertex
pgun.addTool(BeamSpotSmearVertex, name="BeamSpotSmearVertex")
pgun.VertexSmearingTool = "BeamSpotSmearVertex"
pgun.EventType = 12197021
| [
"slavomirastefkova@b2pcx39016.desy.de"
] | slavomirastefkova@b2pcx39016.desy.de |
c84343b041f36369f7566eeaad96750410cb5505 | e5f5b70dc5ef5ee52d897845e68a3d6feef2d17b | /chart/bar.py | 204f6ab486c0cc317318bb2bb047d9ff9ffb7a34 | [
"MIT"
] | permissive | maxhumber/chart | e5b16bffac63b29f814d7778253da3e721bc013e | 8f19609c14dedc09b07e4682c703107d53cd9714 | refs/heads/master | 2020-07-01T21:29:18.914053 | 2020-04-29T18:00:36 | 2020-04-29T18:00:36 | 201,306,637 | 63 | 8 | null | null | null | null | UTF-8 | Python | false | false | 2,233 | py | # HACK: to enable interactive development in Atom/Hydrogen
try:
from .preprocessing import RangeScaler
except ModuleNotFoundError:
from chart.preprocessing import RangeScaler
def create_label(label, label_width):
'''Add right padding to a text label'''
label = label[:label_width]
label = label.rjust(label_width)
label += ': '
return label
def build_row(value, label, width, mark):
'''Build a complete row of data'''
marks = value * mark
row = marks.ljust(width)
row = label + row
return row
def bar(x, y, width=30, label_width=None, mark='▇'):
'''A simple bar chart that prints to the console
:param x: list, array or series of numeric values
:param y: list, array or series of labels for the numeric values
:param width: integer for the character length of the x values
:param label_width: integer for the label character length
:param mark: unicode symbol to mark data values
>>> from chart import bar
>>> x = [500, 200, 900, 400]
>>> y = ['marc', 'mummify', 'chart', 'sausagelink']
>>> bar(x, y)
marc: ▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇
mummify: ▇▇▇▇▇▇▇
chart: ▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇
sausagelink: ▇▇▇▇▇▇▇▇▇▇▇▇▇
>>> import pandas as pd
>>> df = pd.DataFrame({
'artist': ['Tame Impala', 'Childish Gambino', 'The Knocks'],
'listens': [8_456_831, 18_185_245, 2_556_448]
})
>>> bar(df.listens, df.artist, width=20, label_width=11, mark='🔊')
Tame Impala: 🔊🔊🔊🔊🔊🔊🔊🔊🔊
Childish Ga: 🔊🔊🔊🔊🔊🔊🔊🔊🔊🔊🔊🔊🔊🔊🔊🔊🔊🔊🔊🔊
The Knocks: 🔊🔊🔊
'''
if not label_width:
label_width = max([len(l) for l in y])
labels = [create_label(l, label_width) for l in y]
values = RangeScaler((0, width), 0).fit_transform(x)
string_chart = ''
for value, label in zip(values, labels):
string_row = build_row(value, label, width, mark)
string_chart += string_row
string_chart += '\n'
print(string_chart)
| [
"max.humber@gmail.com"
] | max.humber@gmail.com |
ce1e831f63a122a4a600268f2c8e2738f7e4329c | 589363013048ea3962bdc1e8d7b0cb74481e4dbd | /2022/boj11723.py | 02631accea1f0fd66cded1a0f3b65c1dcd0b4509 | [] | no_license | zjvlzld/algoritm | 53b97698a6b4385a0636bb076514a9dd458c8adf | 4339aee51e46ee2fbf3d40bc97fb7fdb51447f50 | refs/heads/master | 2022-08-31T04:48:51.537065 | 2022-08-18T23:59:45 | 2022-08-18T23:59:45 | 178,524,683 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 610 | py | import sys
T=int(input())
S=[-1 for _ in range(21)]
for _ in range(T):
get=sys.stdin.readline().rstrip().split(" ")
if(get[0]=='add'):
S[int(get[1])]=1
continue
elif(get[0]=='remove'):
S[int(get[1])]=-1
continue
elif(get[0]=='check'):
if S[int(get[1])]==1:
print(1)
else:
print(0)
continue
elif(get[0]=='toggle'):
S[int(get[1])]*=-1
continue
elif(get[0]=='all'):
S=[1 for _ in range(21)]
continue
elif(get[0]=='empty'):
S=[-1 for _ in range(21)]
continue | [
"jongsik1995@naver.com"
] | jongsik1995@naver.com |
fb5177041f67fbb172219a9d83558812f3e4e953 | d91f28f9b8882af0f4d36a43775e6ac35a8716f0 | /payroll/urls.py | 667371a3d2f962125bf46ef77fe054877239163f | [] | no_license | shaddysparks/web_system | 9ea74159ee74435a44d5c560b8a3cb0594cb22bf | 1c854a9a2566c51fbfcef2d7f1f9fbbb778c7711 | refs/heads/master | 2020-05-02T11:46:35.998041 | 2019-03-20T10:03:33 | 2019-03-20T10:03:33 | 177,938,855 | 1 | 0 | null | 2019-03-27T07:15:52 | 2019-03-27T07:15:52 | null | UTF-8 | Python | false | false | 131 | py | from django.urls import path
from . import views
app_name = 'payroll'
urlpatterns = [
path('', views.index, name='index'),
]
| [
"kyezaarnold63@gmail.com"
] | kyezaarnold63@gmail.com |
32c73283b5ac44a971e562d676c636b5d8f59a2b | 99b84337ae66ad2877544fd158f20e7f4cd96520 | /day11-20/day17/TCP(老师版)/TCPServer.py | be0ecc868dd22962f21507fc15cd062ce46e03e6 | [] | no_license | jiajiabin/python_study | cf145d54cabce2cb98914b3448ed7d0e5c1c146c | b4faaff26ee9728af2e80942ba6a7c7f6a8b0f86 | refs/heads/master | 2020-06-21T21:31:26.034978 | 2019-08-26T11:39:34 | 2019-08-26T11:39:34 | 197,556,254 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,101 | py | import socket
# 服务端
# 1.配置套接字 首先设置数据传输方式为TCP,然后绑定IP和端口
# AF_INET 表示我们使用的IP地址是IPv4
# AF_INET6 表示使用的IP地址是IPv6
# SOCK_STREAM 表示使用的是TCP协议
# SOCK_DGRAM 表示使用UDP协议
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.bind(("192.168.53.133", 8923))
# 服务器上有很多的程序,很多的进程。通过端口找到指定的进程
# 2.监听客户端的链接请求,参数是最大并发数
server_socket.listen(3)
# 如果没有任何链接,程序就一直卡在这里
# 如果有人发起请求,则向下执行
while True:
# 3.建立链接,返回客户端的IP和端口
client_socket, addr = server_socket.accept()
# 接收客户端发来的数据了
data = client_socket.recv(1024)
# 不能超过4096,一般是2的幂数,如果不是2的幂数,可能出现中文乱码
print(data.decode("utf-8"))
# 发来的数据是二进制,需要解码后使用
client_socket.send("你也好腻害!!".encode("utf-8"))
| [
"2592668397@qq.com"
] | 2592668397@qq.com |
c7c165068d12252bd5b6e7058d0ef6e1db1d6bff | 06f7ffdae684ac3cc258c45c3daabce98243f64f | /vsts/vsts/licensing/v4_0/models/account_license_usage.py | 4fa792ae3d93eb20672766f3fe8c850211691444 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown"
] | permissive | kenkuo/azure-devops-python-api | 7dbfb35f1c9637c9db10207824dd535c4d6861e8 | 9ac38a97a06ee9e0ee56530de170154f6ed39c98 | refs/heads/master | 2020-04-03T17:47:29.526104 | 2018-10-25T17:46:09 | 2018-10-25T17:46:09 | 155,459,045 | 0 | 0 | MIT | 2018-10-30T21:32:43 | 2018-10-30T21:32:42 | null | UTF-8 | Python | false | false | 1,370 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class AccountLicenseUsage(Model):
"""AccountLicenseUsage.
:param license:
:type license: :class:`AccountUserLicense <licensing.v4_0.models.AccountUserLicense>`
:param provisioned_count:
:type provisioned_count: int
:param used_count:
:type used_count: int
"""
_attribute_map = {
'license': {'key': 'license', 'type': 'AccountUserLicense'},
'provisioned_count': {'key': 'provisionedCount', 'type': 'int'},
'used_count': {'key': 'usedCount', 'type': 'int'}
}
def __init__(self, license=None, provisioned_count=None, used_count=None):
super(AccountLicenseUsage, self).__init__()
self.license = license
self.provisioned_count = provisioned_count
self.used_count = used_count
| [
"tedchamb@microsoft.com"
] | tedchamb@microsoft.com |
1ed1155b1470647513721c29c89919c8d698fef0 | dbec98678f62786fabf0dd32f7e1a464b67cca7f | /basic/migrations/0020_auto_20171024_1407.py | eb0ed79bd18802528ff3972927d104fced4861fa | [
"BSD-2-Clause"
] | permissive | kgdunn/django-peer-review-system | afc30052f37584e6db90a361d8d2c7613c7e500e | 8d013961e00d189fbbade5283128e956a27954f8 | refs/heads/master | 2023-07-07T06:39:00.357200 | 2020-03-20T11:49:25 | 2020-03-20T11:49:25 | 244,141,048 | 0 | 0 | BSD-2-Clause | 2023-08-28T17:25:13 | 2020-03-01T12:00:19 | Python | UTF-8 | Python | false | false | 505 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-10-24 12:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('basic', '0019_email_task'),
]
operations = [
migrations.AlterField(
model_name='person',
name='last_lis',
field=models.CharField(blank=True, max_length=200, verbose_name='Last known: lis_result_sourcedid'),
),
]
| [
"kgdunn@gmail.com"
] | kgdunn@gmail.com |
f4447eb60eec73954e51b7efa70ffa9f18869a7a | 24a291e5eb298b7c2b4f1105d789ac488457b59c | /Python_Basics/python06_16_DataTypeEx0_김민교.py | 539d13510fc578ff955549ac1e7e68e6f860fb61 | [] | no_license | gmrdns03/Python-Introductory-Course_Minkyo | da3afff502ed44f178d5b3885fbb1b01249ad1de | ef0d4e16aee3dba6a4a10c422ef68b1465745833 | refs/heads/main | 2023-05-29T16:08:31.814542 | 2021-06-23T13:32:14 | 2021-06-23T13:32:14 | 379,300,979 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py |
vl01 = 10
vl02 = 20
print(id(vl01))
print(id(vl02))
'''
1530162480 == vl01이라는 변수에 10이 저장되어 있는 주소값
1530162800 == vl02이라는 변수에 20이 저장되어 있는 주소값
id 함수는 변수가 가리키고 있는 객체의 주소 값을 돌려주는 파이썬 내장 함수이다.
즉 여기에서 필자가 만든 변수 vl01이 가리키는 리스트의 주소 값은 1530162480 임을 알 수 있다.
''' | [
"noreply@github.com"
] | gmrdns03.noreply@github.com |
47c2d6a4851473c5dd8779a58f3fbb002659da78 | 64bf39b96a014b5d3f69b3311430185c64a7ff0e | /intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/inspur/sm/plugins/modules/self_test_info.py | a5b0be963a3356086489952a29788583ec4b1504 | [
"MIT",
"GPL-3.0-only",
"GPL-3.0-or-later"
] | permissive | SimonFangCisco/dne-dna-code | 7072eba7da0389e37507b7a2aa5f7d0c0735a220 | 2ea7d4f00212f502bc684ac257371ada73da1ca9 | refs/heads/master | 2023-03-10T23:10:31.392558 | 2021-02-25T15:04:36 | 2021-02-25T15:04:36 | 342,274,373 | 0 | 0 | MIT | 2021-02-25T14:39:22 | 2021-02-25T14:39:22 | null | UTF-8 | Python | false | false | 2,215 | py | #!/usr/bin/python
# -*- coding:utf-8 -*-
# Copyright (C) 2020 Inspur Inc. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
---
module: self_test_info
version_added: "0.1.0"
author:
- WangBaoshan (@ISIB-group)
short_description: Get self test information.
description:
- Get self test information on Inspur server.
options: {}
extends_documentation_fragment:
- inspur.sm.ism
'''
EXAMPLES = '''
- name: self test
hosts: ism
connection: local
gather_facts: no
vars:
ism:
host: "{{ ansible_ssh_host }}"
username: "{{ username }}"
password: "{{ password }}"
tasks:
- name: "Get self test information"
inspur.sm.self_test_info:
provider: "{{ ism }}"
'''
RETURN = '''
message:
description: Messages returned after module execution.
returned: always
type: str
state:
description: Status after module execution.
returned: always
type: str
changed:
description: Check to see if a change was made on the device.
returned: always
type: bool
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.inspur.sm.plugins.module_utils.ism import (ism_argument_spec, get_connection)
class Test(object):
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
self.results = dict()
def init_module(self):
"""Init module object"""
self.module = AnsibleModule(
argument_spec=self.spec, supports_check_mode=True)
def run_command(self):
self.module.params['subcommand'] = 'getselftest'
self.results = get_connection(self.module)
def show_result(self):
"""Show result"""
self.module.exit_json(**self.results)
def work(self):
"""Worker"""
self.run_command()
self.show_result()
def main():
argument_spec = dict()
argument_spec.update(ism_argument_spec)
test_obj = Test(argument_spec)
test_obj.work()
if __name__ == '__main__':
main()
| [
"sifang@cisco.com"
] | sifang@cisco.com |
a9b71fb35cb9b4697e21386b2dfe9d822a2ea36d | 29e4d393351c87741f069092eb8d0ab6f1221d6f | /venv/lib/python3.6/site-packages/Crypto/SelfTest/Cipher/common.py | 1e0bd39b12a294f29a22d8089d838bce068bfaf9 | [
"MIT"
] | permissive | masora1030/eigoyurusan | f0eb7d9761aa150379b558c13fc2477daf504417 | fa82044a2dc2f0f1f7454f5394e6d68fa923c289 | refs/heads/master | 2022-12-01T09:31:17.330620 | 2020-07-22T14:51:59 | 2020-07-22T14:51:59 | 279,682,018 | 11 | 2 | MIT | 2020-07-22T22:02:57 | 2020-07-14T20:03:45 | Python | UTF-8 | Python | false | false | 16,996 | py | # -*- coding: utf-8 -*-
#
# SelfTest/Hash/common.py: Common code for Crypto.SelfTest.Hash
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-testing for PyCrypto hash modules"""
import unittest
from binascii import a2b_hex, b2a_hex, hexlify
from Crypto.Util.py3compat import b, _memoryview
from Crypto.Util.strxor import strxor_c
class _NoDefault: pass # sentinel object
def _extract(d, k, default=_NoDefault):
"""Get an item from a dictionary, and remove it from the dictionary."""
try:
retval = d[k]
except KeyError:
if default is _NoDefault:
raise
return default
del d[k]
return retval
# Generic cipher test case
class CipherSelfTest(unittest.TestCase):
def __init__(self, module, params):
unittest.TestCase.__init__(self)
self.module = module
# Extract the parameters
params = params.copy()
self.description = _extract(params, 'description')
self.key = b(_extract(params, 'key'))
self.plaintext = b(_extract(params, 'plaintext'))
self.ciphertext = b(_extract(params, 'ciphertext'))
self.module_name = _extract(params, 'module_name', None)
self.assoc_data = _extract(params, 'assoc_data', None)
self.mac = _extract(params, 'mac', None)
if self.assoc_data:
self.mac = b(self.mac)
mode = _extract(params, 'mode', None)
self.mode_name = str(mode)
if mode is not None:
# Block cipher
self.mode = getattr(self.module, "MODE_" + mode)
self.iv = _extract(params, 'iv', None)
if self.iv is None:
self.iv = _extract(params, 'nonce', None)
if self.iv is not None:
self.iv = b(self.iv)
else:
# Stream cipher
self.mode = None
self.iv = _extract(params, 'iv', None)
if self.iv is not None:
self.iv = b(self.iv)
self.extra_params = params
def shortDescription(self):
return self.description
def _new(self):
params = self.extra_params.copy()
key = a2b_hex(self.key)
old_style = []
if self.mode is not None:
old_style = [ self.mode ]
if self.iv is not None:
old_style += [ a2b_hex(self.iv) ]
return self.module.new(key, *old_style, **params)
def isMode(self, name):
if not hasattr(self.module, "MODE_"+name):
return False
return self.mode == getattr(self.module, "MODE_"+name)
def runTest(self):
plaintext = a2b_hex(self.plaintext)
ciphertext = a2b_hex(self.ciphertext)
assoc_data = []
if self.assoc_data:
assoc_data = [ a2b_hex(b(x)) for x in self.assoc_data]
ct = None
pt = None
#
# Repeat the same encryption or decryption twice and verify
# that the result is always the same
#
for i in range(2):
cipher = self._new()
decipher = self._new()
# Only AEAD modes
for comp in assoc_data:
cipher.update(comp)
decipher.update(comp)
ctX = b2a_hex(cipher.encrypt(plaintext))
ptX = b2a_hex(decipher.decrypt(ciphertext))
if ct:
self.assertEqual(ct, ctX)
self.assertEqual(pt, ptX)
ct, pt = ctX, ptX
self.assertEqual(self.ciphertext, ct) # encrypt
self.assertEqual(self.plaintext, pt) # decrypt
if self.mac:
mac = b2a_hex(cipher.digest())
self.assertEqual(self.mac, mac)
decipher.verify(a2b_hex(self.mac))
class CipherStreamingSelfTest(CipherSelfTest):
def shortDescription(self):
desc = self.module_name
if self.mode is not None:
desc += " in %s mode" % (self.mode_name,)
return "%s should behave like a stream cipher" % (desc,)
def runTest(self):
plaintext = a2b_hex(self.plaintext)
ciphertext = a2b_hex(self.ciphertext)
# The cipher should work like a stream cipher
# Test counter mode encryption, 3 bytes at a time
ct3 = []
cipher = self._new()
for i in range(0, len(plaintext), 3):
ct3.append(cipher.encrypt(plaintext[i:i+3]))
ct3 = b2a_hex(b("").join(ct3))
self.assertEqual(self.ciphertext, ct3) # encryption (3 bytes at a time)
# Test counter mode decryption, 3 bytes at a time
pt3 = []
cipher = self._new()
for i in range(0, len(ciphertext), 3):
pt3.append(cipher.encrypt(ciphertext[i:i+3]))
# PY3K: This is meant to be text, do not change to bytes (data)
pt3 = b2a_hex(b("").join(pt3))
self.assertEqual(self.plaintext, pt3) # decryption (3 bytes at a time)
class RoundtripTest(unittest.TestCase):
def __init__(self, module, params):
from Crypto import Random
unittest.TestCase.__init__(self)
self.module = module
self.iv = Random.get_random_bytes(module.block_size)
self.key = b(params['key'])
self.plaintext = 100 * b(params['plaintext'])
self.module_name = params.get('module_name', None)
def shortDescription(self):
return """%s .decrypt() output of .encrypt() should not be garbled""" % (self.module_name,)
def runTest(self):
## ECB mode
mode = self.module.MODE_ECB
encryption_cipher = self.module.new(a2b_hex(self.key), mode)
ciphertext = encryption_cipher.encrypt(self.plaintext)
decryption_cipher = self.module.new(a2b_hex(self.key), mode)
decrypted_plaintext = decryption_cipher.decrypt(ciphertext)
self.assertEqual(self.plaintext, decrypted_plaintext)
class IVLengthTest(unittest.TestCase):
def __init__(self, module, params):
unittest.TestCase.__init__(self)
self.module = module
self.key = b(params['key'])
def shortDescription(self):
return "Check that all modes except MODE_ECB and MODE_CTR require an IV of the proper length"
def runTest(self):
self.assertRaises(TypeError, self.module.new, a2b_hex(self.key),
self.module.MODE_ECB, b(""))
def _dummy_counter(self):
return "\0" * self.module.block_size
class NoDefaultECBTest(unittest.TestCase):
def __init__(self, module, params):
unittest.TestCase.__init__(self)
self.module = module
self.key = b(params['key'])
def runTest(self):
self.assertRaises(TypeError, self.module.new, a2b_hex(self.key))
class ByteArrayTest(unittest.TestCase):
"""Verify we can use bytearray's for encrypting and decrypting"""
def __init__(self, module, params):
unittest.TestCase.__init__(self)
self.module = module
# Extract the parameters
params = params.copy()
self.description = _extract(params, 'description')
self.key = b(_extract(params, 'key'))
self.plaintext = b(_extract(params, 'plaintext'))
self.ciphertext = b(_extract(params, 'ciphertext'))
self.module_name = _extract(params, 'module_name', None)
self.assoc_data = _extract(params, 'assoc_data', None)
self.mac = _extract(params, 'mac', None)
if self.assoc_data:
self.mac = b(self.mac)
mode = _extract(params, 'mode', None)
self.mode_name = str(mode)
if mode is not None:
# Block cipher
self.mode = getattr(self.module, "MODE_" + mode)
self.iv = _extract(params, 'iv', None)
if self.iv is None:
self.iv = _extract(params, 'nonce', None)
if self.iv is not None:
self.iv = b(self.iv)
else:
# Stream cipher
self.mode = None
self.iv = _extract(params, 'iv', None)
if self.iv is not None:
self.iv = b(self.iv)
self.extra_params = params
def _new(self):
params = self.extra_params.copy()
key = a2b_hex(self.key)
old_style = []
if self.mode is not None:
old_style = [ self.mode ]
if self.iv is not None:
old_style += [ a2b_hex(self.iv) ]
return self.module.new(key, *old_style, **params)
def runTest(self):
plaintext = a2b_hex(self.plaintext)
ciphertext = a2b_hex(self.ciphertext)
assoc_data = []
if self.assoc_data:
assoc_data = [ bytearray(a2b_hex(b(x))) for x in self.assoc_data]
cipher = self._new()
decipher = self._new()
# Only AEAD modes
for comp in assoc_data:
cipher.update(comp)
decipher.update(comp)
ct = b2a_hex(cipher.encrypt(bytearray(plaintext)))
pt = b2a_hex(decipher.decrypt(bytearray(ciphertext)))
self.assertEqual(self.ciphertext, ct) # encrypt
self.assertEqual(self.plaintext, pt) # decrypt
if self.mac:
mac = b2a_hex(cipher.digest())
self.assertEqual(self.mac, mac)
decipher.verify(bytearray(a2b_hex(self.mac)))
class MemoryviewTest(unittest.TestCase):
"""Verify we can use memoryviews for encrypting and decrypting"""
def __init__(self, module, params):
unittest.TestCase.__init__(self)
self.module = module
# Extract the parameters
params = params.copy()
self.description = _extract(params, 'description')
self.key = b(_extract(params, 'key'))
self.plaintext = b(_extract(params, 'plaintext'))
self.ciphertext = b(_extract(params, 'ciphertext'))
self.module_name = _extract(params, 'module_name', None)
self.assoc_data = _extract(params, 'assoc_data', None)
self.mac = _extract(params, 'mac', None)
if self.assoc_data:
self.mac = b(self.mac)
mode = _extract(params, 'mode', None)
self.mode_name = str(mode)
if mode is not None:
# Block cipher
self.mode = getattr(self.module, "MODE_" + mode)
self.iv = _extract(params, 'iv', None)
if self.iv is None:
self.iv = _extract(params, 'nonce', None)
if self.iv is not None:
self.iv = b(self.iv)
else:
# Stream cipher
self.mode = None
self.iv = _extract(params, 'iv', None)
if self.iv is not None:
self.iv = b(self.iv)
self.extra_params = params
def _new(self):
params = self.extra_params.copy()
key = a2b_hex(self.key)
old_style = []
if self.mode is not None:
old_style = [ self.mode ]
if self.iv is not None:
old_style += [ a2b_hex(self.iv) ]
return self.module.new(key, *old_style, **params)
def runTest(self):
plaintext = a2b_hex(self.plaintext)
ciphertext = a2b_hex(self.ciphertext)
assoc_data = []
if self.assoc_data:
assoc_data = [ memoryview(a2b_hex(b(x))) for x in self.assoc_data]
cipher = self._new()
decipher = self._new()
# Only AEAD modes
for comp in assoc_data:
cipher.update(comp)
decipher.update(comp)
ct = b2a_hex(cipher.encrypt(memoryview(plaintext)))
pt = b2a_hex(decipher.decrypt(memoryview(ciphertext)))
self.assertEqual(self.ciphertext, ct) # encrypt
self.assertEqual(self.plaintext, pt) # decrypt
if self.mac:
mac = b2a_hex(cipher.digest())
self.assertEqual(self.mac, mac)
decipher.verify(memoryview(a2b_hex(self.mac)))
def make_block_tests(module, module_name, test_data, additional_params=dict()):
tests = []
extra_tests_added = False
for i in range(len(test_data)):
row = test_data[i]
# Build the "params" dictionary with
# - plaintext
# - ciphertext
# - key
# - mode (default is ECB)
# - (optionally) description
# - (optionally) any other parameter that this cipher mode requires
params = {}
if len(row) == 3:
(params['plaintext'], params['ciphertext'], params['key']) = row
elif len(row) == 4:
(params['plaintext'], params['ciphertext'], params['key'], params['description']) = row
elif len(row) == 5:
(params['plaintext'], params['ciphertext'], params['key'], params['description'], extra_params) = row
params.update(extra_params)
else:
raise AssertionError("Unsupported tuple size %d" % (len(row),))
if not "mode" in params:
params["mode"] = "ECB"
# Build the display-name for the test
p2 = params.copy()
p_key = _extract(p2, 'key')
p_plaintext = _extract(p2, 'plaintext')
p_ciphertext = _extract(p2, 'ciphertext')
p_mode = _extract(p2, 'mode')
p_description = _extract(p2, 'description', None)
if p_description is not None:
description = p_description
elif p_mode == 'ECB' and not p2:
description = "p=%s, k=%s" % (p_plaintext, p_key)
else:
description = "p=%s, k=%s, %r" % (p_plaintext, p_key, p2)
name = "%s #%d: %s" % (module_name, i+1, description)
params['description'] = name
params['module_name'] = module_name
params.update(additional_params)
# Add extra test(s) to the test suite before the current test
if not extra_tests_added:
tests += [
RoundtripTest(module, params),
IVLengthTest(module, params),
NoDefaultECBTest(module, params),
ByteArrayTest(module, params),
]
extra_tests_added = True
# Add the current test to the test suite
tests.append(CipherSelfTest(module, params))
return tests
def make_stream_tests(module, module_name, test_data):
tests = []
extra_tests_added = False
for i in range(len(test_data)):
row = test_data[i]
# Build the "params" dictionary
params = {}
if len(row) == 3:
(params['plaintext'], params['ciphertext'], params['key']) = row
elif len(row) == 4:
(params['plaintext'], params['ciphertext'], params['key'], params['description']) = row
elif len(row) == 5:
(params['plaintext'], params['ciphertext'], params['key'], params['description'], extra_params) = row
params.update(extra_params)
else:
raise AssertionError("Unsupported tuple size %d" % (len(row),))
# Build the display-name for the test
p2 = params.copy()
p_key = _extract(p2, 'key')
p_plaintext = _extract(p2, 'plaintext')
p_ciphertext = _extract(p2, 'ciphertext')
p_description = _extract(p2, 'description', None)
if p_description is not None:
description = p_description
elif not p2:
description = "p=%s, k=%s" % (p_plaintext, p_key)
else:
description = "p=%s, k=%s, %r" % (p_plaintext, p_key, p2)
name = "%s #%d: %s" % (module_name, i+1, description)
params['description'] = name
params['module_name'] = module_name
# Add extra test(s) to the test suite before the current test
if not extra_tests_added:
tests += [
ByteArrayTest(module, params),
]
import sys
if sys.version[:3] != '2.6':
tests.append(MemoryviewTest(module, params))
extra_tests_added = True
# Add the test to the test suite
tests.append(CipherSelfTest(module, params))
tests.append(CipherStreamingSelfTest(module, params))
return tests
# vim:set ts=4 sw=4 sts=4 expandtab:
| [
"soraemonpockt@icloud.com"
] | soraemonpockt@icloud.com |
4bec9cee9ab3ceccad438dbfcd99bb203b1eb59c | 6b8c3974d3ce5f7841e51dcb406666c0c5d92155 | /heat/heat/tests/neutron/test_neutron_network_gateway.py | 66ae2180b7545a572fcff56648dd482313effa70 | [
"Apache-2.0"
] | permissive | swjang/cloudexchange | bbbf78a2e7444c1070a55378092c17e8ecb27059 | c06ed54f38daeff23166fb0940b27df74c70fc3e | refs/heads/master | 2020-12-29T03:18:43.076887 | 2015-09-21T07:13:22 | 2015-09-21T07:13:22 | 42,845,532 | 1 | 1 | null | 2015-09-21T07:13:22 | 2015-09-21T05:19:35 | C++ | UTF-8 | Python | false | false | 22,563 | py | #
# Copyright 2013 NTT Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mox
from neutronclient.common import exceptions as qe
from neutronclient.neutron import v2_0 as neutronV20
from neutronclient.v2_0 import client as neutronclient
import six
from heat.common import exception
from heat.common import template_format
from heat.engine.resources.openstack.neutron import network_gateway
from heat.engine import rsrc_defn
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
gw_template_deprecated = '''
heat_template_version: 2015-04-30
description: Template to test Network Gateway resource
resources:
NetworkGateway:
type: OS::Neutron::NetworkGateway
properties:
name: NetworkGateway
devices:
- id: e52148ca-7db9-4ec3-abe6-2c7c0ff316eb
interface_name: breth1
connections:
- network_id: 6af055d3-26f6-48dd-a597-7611d7e58d35
segmentation_type: vlan
segmentation_id: 10
'''
gw_template = '''
heat_template_version: 2015-04-30
description: Template to test Network Gateway resource
resources:
NetworkGateway:
type: OS::Neutron::NetworkGateway
properties:
name: NetworkGateway
devices:
- id: e52148ca-7db9-4ec3-abe6-2c7c0ff316eb
interface_name: breth1
connections:
- network: 6af055d3-26f6-48dd-a597-7611d7e58d35
segmentation_type: vlan
segmentation_id: 10
'''
sng = {
'network_gateway': {
'id': 'ed4c03b9-8251-4c09-acc4-e59ee9e6aa37',
'name': 'NetworkGateway',
'default': False,
'tenant_id': '96ba52dc-c5c5-44c6-9a9d-d3ba1a03f77f',
'devices': [{
'id': 'e52148ca-7db9-4ec3-abe6-2c7c0ff316eb',
'interface_name': 'breth1'}],
'ports': [{
'segmentation_type': 'vlan',
'port_id': '32acc49c-899e-44ea-8177-6f4157e12eb4',
'segmentation_id': 10}]
}
}
class NeutronNetworkGatewayTest(common.HeatTestCase):
def setUp(self):
super(NeutronNetworkGatewayTest, self).setUp()
self.m.StubOutWithMock(neutronclient.Client, 'create_network_gateway')
self.m.StubOutWithMock(neutronclient.Client, 'show_network_gateway')
self.m.StubOutWithMock(neutronclient.Client, 'delete_network_gateway')
self.m.StubOutWithMock(neutronclient.Client, 'connect_network_gateway')
self.m.StubOutWithMock(neutronclient.Client, 'update_network_gateway')
self.m.StubOutWithMock(neutronclient.Client,
'disconnect_network_gateway')
self.m.StubOutWithMock(neutronclient.Client, 'list_networks')
self.m.StubOutWithMock(neutronV20, 'find_resourceid_by_name_or_id')
def mock_create_fail_network_not_found_delete_success(self):
neutronclient.Client.create_network_gateway({
'network_gateway': {
'name': u'NetworkGateway',
'devices': [{'id': u'e52148ca-7db9-4ec3-abe6-2c7c0ff316eb',
'interface_name': u'breth1'}]
}
}
).AndReturn({
'network_gateway': {
'id': 'ed4c03b9-8251-4c09-acc4-e59ee9e6aa37',
'name': 'NetworkGateway',
'default': False,
'tenant_id': '96ba52dc-c5c5-44c6-9a9d-d3ba1a03f77f',
'devices': [{
'id': 'e52148ca-7db9-4ec3-abe6-2c7c0ff316eb',
'interface_name': 'breth1'}]
}
}
)
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'6af055d3-26f6-48dd-a597-7611d7e58d35'
).MultipleTimes().AndRaise(qe.NeutronClientException(status_code=404))
# mock successful to delete the network_gateway
neutronclient.Client.delete_network_gateway(
u'ed4c03b9-8251-4c09-acc4-e59ee9e6aa37'
).AndReturn(None)
neutronclient.Client.show_network_gateway(
u'ed4c03b9-8251-4c09-acc4-e59ee9e6aa37'
).AndRaise(qe.NeutronClientException(status_code=404))
t = template_format.parse(gw_template)
self.stack = utils.parse_stack(t)
resource_defns = self.stack.t.resource_definitions(self.stack)
rsrc = network_gateway.NetworkGateway(
'test_network_gateway',
resource_defns['NetworkGateway'], self.stack)
return rsrc
def prepare_create_network_gateway(self, resolve_neutron=True):
neutronclient.Client.create_network_gateway({
'network_gateway': {
'name': u'NetworkGateway',
'devices': [{'id': u'e52148ca-7db9-4ec3-abe6-2c7c0ff316eb',
'interface_name': u'breth1'}]
}
}
).AndReturn({
'network_gateway': {
'id': 'ed4c03b9-8251-4c09-acc4-e59ee9e6aa37',
'name': 'NetworkGateway',
'default': False,
'tenant_id': '96ba52dc-c5c5-44c6-9a9d-d3ba1a03f77f',
'devices': [{
'id': 'e52148ca-7db9-4ec3-abe6-2c7c0ff316eb',
'interface_name': 'breth1'}]
}
}
)
neutronclient.Client.connect_network_gateway(
u'ed4c03b9-8251-4c09-acc4-e59ee9e6aa37', {
'network_id': u'6af055d3-26f6-48dd-a597-7611d7e58d35',
'segmentation_id': 10,
'segmentation_type': u'vlan'
}
).AndReturn({
'connection_info': {
'network_gateway_id': u'ed4c03b9-8251-4c09-acc4-e59ee9e6aa37',
'network_id': u'6af055d3-26f6-48dd-a597-7611d7e58d35',
'port_id': u'32acc49c-899e-44ea-8177-6f4157e12eb4'
}
})
self.stub_NetworkConstraint_validate()
if resolve_neutron:
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'6af055d3-26f6-48dd-a597-7611d7e58d35'
).AndReturn('6af055d3-26f6-48dd-a597-7611d7e58d35')
t = template_format.parse(gw_template)
else:
t = template_format.parse(gw_template_deprecated)
self.stack = utils.parse_stack(t)
resource_defns = self.stack.t.resource_definitions(self.stack)
rsrc = network_gateway.NetworkGateway(
'test_network_gateway',
resource_defns['NetworkGateway'], self.stack)
return rsrc
def _test_network_gateway_create(self, resolve_neutron=True):
rsrc = self.prepare_create_network_gateway(resolve_neutron)
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'6af055d3-26f6-48dd-a597-7611d7e58d35'
).MultipleTimes().AndReturn(
'6af055d3-26f6-48dd-a597-7611d7e58d35')
neutronclient.Client.disconnect_network_gateway(
'ed4c03b9-8251-4c09-acc4-e59ee9e6aa37', {
'network_id': u'6af055d3-26f6-48dd-a597-7611d7e58d35',
'segmentation_id': 10,
'segmentation_type': u'vlan'
}
).AndReturn(None)
neutronclient.Client.disconnect_network_gateway(
u'ed4c03b9-8251-4c09-acc4-e59ee9e6aa37', {
'network_id': u'6af055d3-26f6-48dd-a597-7611d7e58d35',
'segmentation_id': 10,
'segmentation_type': u'vlan'
}
).AndReturn(qe.NeutronClientException(status_code=404))
neutronclient.Client.delete_network_gateway(
u'ed4c03b9-8251-4c09-acc4-e59ee9e6aa37'
).AndReturn(None)
neutronclient.Client.show_network_gateway(
u'ed4c03b9-8251-4c09-acc4-e59ee9e6aa37'
).AndReturn(sng)
neutronclient.Client.show_network_gateway(
u'ed4c03b9-8251-4c09-acc4-e59ee9e6aa37'
).AndRaise(qe.NeutronClientException(status_code=404))
neutronclient.Client.delete_network_gateway(
u'ed4c03b9-8251-4c09-acc4-e59ee9e6aa37'
).AndRaise(qe.NeutronClientException(status_code=404))
self.m.ReplayAll()
rsrc.validate()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
ref_id = rsrc.FnGetRefId()
self.assertEqual(u'ed4c03b9-8251-4c09-acc4-e59ee9e6aa37', ref_id)
self.assertRaises(
exception.InvalidTemplateAttribute, rsrc.FnGetAtt, 'Foo')
self.assertIsNone(scheduler.TaskRunner(rsrc.delete)())
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
rsrc.state_set(rsrc.CREATE, rsrc.COMPLETE, 'to delete again')
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_network_gateway_create_deprecated(self):
self._test_network_gateway_create(resolve_neutron=False)
def test_network_gateway_create(self):
self._test_network_gateway_create()
def test_network_gateway_create_fail_delete_success(self):
# if network_gateway created successful, but didn't to connect with
# network, then can delete the network_gateway successful
# without residue network_gateway
rsrc = self.mock_create_fail_network_not_found_delete_success()
self.stub_NetworkConstraint_validate()
self.m.ReplayAll()
rsrc.validate()
self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.create))
self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state)
ref_id = rsrc.FnGetRefId()
self.assertEqual(u'ed4c03b9-8251-4c09-acc4-e59ee9e6aa37', ref_id)
self.assertIsNone(scheduler.TaskRunner(rsrc.delete)())
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_network_gateway_update(self):
rsrc = self.prepare_create_network_gateway()
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'6af055d3-26f6-48dd-a597-7611d7e58d35'
).AndReturn('6af055d3-26f6-48dd-a597-7611d7e58d35')
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'6af055d3-26f6-48dd-a597-7611d7e58d35'
).AndReturn('6af055d3-26f6-48dd-a597-7611d7e58d35')
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'6af055d3-26f6-48dd-a597-7611d7e58d35'
).AndReturn('6af055d3-26f6-48dd-a597-7611d7e58d35')
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'6af055d3-26f6-48dd-a597-7611d7e58d35'
).AndReturn('6af055d3-26f6-48dd-a597-7611d7e58d35')
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'6af055d3-26f6-48dd-a597-7611d7e58d35'
).AndReturn('6af055d3-26f6-48dd-a597-7611d7e58d35')
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'6af055d3-26f6-48dd-a597-7611d7e58d35'
).AndReturn('6af055d3-26f6-48dd-a597-7611d7e58d35')
neutronclient.Client.update_network_gateway(
u'ed4c03b9-8251-4c09-acc4-e59ee9e6aa37', {
'network_gateway': {
'name': u'NetworkGatewayUpdate'
}
}
).AndReturn(None)
neutronclient.Client.disconnect_network_gateway(
u'ed4c03b9-8251-4c09-acc4-e59ee9e6aa37', {
'network_id': u'6af055d3-26f6-48dd-a597-7611d7e58d35',
'segmentation_id': 10,
'segmentation_type': u'vlan'
}
).AndReturn(None)
neutronclient.Client.connect_network_gateway(
u'ed4c03b9-8251-4c09-acc4-e59ee9e6aa37', {
'network_id': u'6af055d3-26f6-48dd-a597-7611d7e58d35',
'segmentation_id': 0,
'segmentation_type': u'flat'
}
).AndReturn({
'connection_info': {
'network_gateway_id': u'ed4c03b9-8251-4c09-acc4-e59ee9e6aa37',
'network_id': u'6af055d3-26f6-48dd-a597-7611d7e58d35',
'port_id': u'aa800972-f6be-4c65-8453-9ab31834bf80'
}
})
neutronclient.Client.disconnect_network_gateway(
u'ed4c03b9-8251-4c09-acc4-e59ee9e6aa37', {
'network_id': u'6af055d3-26f6-48dd-a597-7611d7e58d35',
'segmentation_id': 10,
'segmentation_type': u'vlan'
}
).AndRaise(qe.NeutronClientException(status_code=404))
neutronclient.Client.connect_network_gateway(
u'ed4c03b9-8251-4c09-acc4-e59ee9e6aa37', {
'network_id': u'6af055d3-26f6-48dd-a597-7611d7e58d35',
'segmentation_id': 0,
'segmentation_type': u'flat'
}
).AndReturn({
'connection_info': {
'network_gateway_id': u'ed4c03b9-8251-4c09-acc4-e59ee9e6aa37',
'network_id': u'6af055d3-26f6-48dd-a597-7611d7e58d35',
'port_id': u'aa800972-f6be-4c65-8453-9ab31834bf80'
}
})
neutronclient.Client.disconnect_network_gateway(
u'ed4c03b9-8251-4c09-acc4-e59ee9e6aa37', {
'network_id': u'6af055d3-26f6-48dd-a597-7611d7e58d35',
'segmentation_id': 10,
'segmentation_type': u'vlan'
}
).AndReturn(None)
neutronclient.Client.delete_network_gateway(
u'ed4c03b9-8251-4c09-acc4-e59ee9e6aa37'
).AndReturn(None)
neutronclient.Client.create_network_gateway({
'network_gateway': {
'name': u'NetworkGateway',
'devices': [{'id': u'e52148ca-7db9-4ec3-abe6-2c7c0ff316eb',
'interface_name': u'breth2'}]
}
}
).AndReturn({
'network_gateway': {
'id': 'ed4c03b9-8251-4c09-acc4-e59ee9e6aa37',
'name': 'NetworkGateway',
'default': False,
'tenant_id': '96ba52dc-c5c5-44c6-9a9d-d3ba1a03f77f',
'devices': [{
'id': 'e52148ca-7db9-4ec3-abe6-2c7c0ff316eb',
'interface_name': 'breth2'}]
}
}
)
neutronclient.Client.connect_network_gateway(
u'ed4c03b9-8251-4c09-acc4-e59ee9e6aa37', {
'network_id': u'6af055d3-26f6-48dd-a597-7611d7e58d35',
'segmentation_id': 10,
'segmentation_type': u'vlan'
}
).AndReturn({
'connection_info': {
'network_gateway_id': u'ed4c03b9-8251-4c09-acc4-e59ee9e6aa37',
'network_id': u'6af055d3-26f6-48dd-a597-7611d7e58d35',
'port_id': u'aa800972-f6be-4c65-8453-9ab31834bf80'
}
})
self.m.ReplayAll()
rsrc.validate()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
# update name
snippet_for_update = rsrc_defn.ResourceDefinition(
rsrc.name,
rsrc.type(),
{
'name': u'NetworkGatewayUpdate',
'devices': [{
'id': u'e52148ca-7db9-4ec3-abe6-2c7c0ff316eb',
'interface_name': u'breth1'}],
'connections': [{
'network': '6af055d3-26f6-48dd-a597-7611d7e58d35',
'segmentation_type': 'vlan',
'segmentation_id': 10}]
})
prop_diff = {'name': u'NetworkGatewayUpdate'}
self.assertIsNone(rsrc.handle_update(snippet_for_update,
mox.IgnoreArg(),
prop_diff))
# update connections
snippet_for_update = rsrc_defn.ResourceDefinition(
rsrc.name,
rsrc.type(),
{
'name': u'NetworkGateway',
'devices': [{
'id': u'e52148ca-7db9-4ec3-abe6-2c7c0ff316eb',
'interface_name': u'breth1'}],
'connections': [{
'network': u'6af055d3-26f6-48dd-a597-7611d7e58d35',
'segmentation_type': u'flat',
'segmentation_id': 0}]
})
prop_diff = {
'connections': [{
'network': u'6af055d3-26f6-48dd-a597-7611d7e58d35',
'segmentation_type': u'flat',
'segmentation_id': 0}]
}
self.assertIsNone(rsrc.handle_update(snippet_for_update,
mox.IgnoreArg(),
prop_diff))
# update connections once more
self.assertIsNone(rsrc.handle_update(snippet_for_update,
mox.IgnoreArg(),
prop_diff))
# update devices
snippet_for_update = rsrc_defn.ResourceDefinition(
rsrc.name,
rsrc.type(),
{
'name': u'NetworkGateway',
'devices': [{
'id': u'e52148ca-7db9-4ec3-abe6-2c7c0ff316eb',
'interface_name': u'breth2'}],
'connections': [{
'network_id': u'6af055d3-26f6-48dd-a597-7611d7e58d35',
'segmentation_type': u'vlan',
'segmentation_id': 10}]
})
prop_diff = {
'devices': [{
'id': u'e52148ca-7db9-4ec3-abe6-2c7c0ff316eb',
'interface_name': u'breth2'}]
}
self.assertIsNone(rsrc.handle_update(snippet_for_update,
mox.IgnoreArg(),
prop_diff))
self.m.VerifyAll()
def test_network_gatway_create_failed(self):
neutronclient.Client.create_network_gateway({
'network_gateway': {
'name': u'NetworkGateway',
'devices': [{
'id': u'e52148ca-7db9-4ec3-abe6-2c7c0ff316eb',
'interface_name': u'breth1'}]
}
}
).AndRaise(qe.NeutronClientException)
self.stub_NetworkConstraint_validate()
self.m.ReplayAll()
t = template_format.parse(gw_template)
stack = utils.parse_stack(t)
resource_defns = stack.t.resource_definitions(stack)
rsrc = network_gateway.NetworkGateway(
'network_gateway', resource_defns['NetworkGateway'], stack)
error = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.create))
self.assertEqual(
'NeutronClientException: resources.network_gateway: '
'An unknown exception occurred.',
six.text_type(error))
self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state)
self.assertIsNone(scheduler.TaskRunner(rsrc.delete)())
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_gateway_validate_failed_with_vlan(self):
t = template_format.parse(gw_template)
del t['resources']['NetworkGateway']['properties'][
'connections'][0]['segmentation_id']
stack = utils.parse_stack(t)
resource_defns = stack.t.resource_definitions(stack)
rsrc = network_gateway.NetworkGateway(
'test_network_gateway',
resource_defns['NetworkGateway'], stack)
self.stub_NetworkConstraint_validate()
self.m.ReplayAll()
error = self.assertRaises(exception.StackValidationFailed,
scheduler.TaskRunner(rsrc.validate))
self.assertEqual(
'segmentation_id must be specified for using vlan',
six.text_type(error))
self.m.VerifyAll()
def test_gateway_validate_failed_with_flat(self):
t = template_format.parse(gw_template)
t['resources']['NetworkGateway']['properties'][
'connections'][0]['segmentation_type'] = 'flat'
stack = utils.parse_stack(t)
resource_defns = stack.t.resource_definitions(stack)
rsrc = network_gateway.NetworkGateway(
'test_network_gateway',
resource_defns['NetworkGateway'], stack)
self.stub_NetworkConstraint_validate()
self.m.ReplayAll()
error = self.assertRaises(exception.StackValidationFailed,
scheduler.TaskRunner(rsrc.validate))
self.assertEqual(
'segmentation_id cannot be specified except 0 for using flat',
six.text_type(error))
self.m.VerifyAll()
def test_network_gateway_attribute(self):
neutronclient.Client.show_network_gateway(
u'ed4c03b9-8251-4c09-acc4-e59ee9e6aa37'
).MultipleTimes().AndReturn(sng)
rsrc = self.prepare_create_network_gateway()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual(u'ed4c03b9-8251-4c09-acc4-e59ee9e6aa37',
rsrc.FnGetRefId())
self.assertEqual(False, rsrc.FnGetAtt('default'))
error = self.assertRaises(exception.InvalidTemplateAttribute,
rsrc.FnGetAtt, 'hoge')
self.assertEqual(
'The Referenced Attribute (test_network_gateway hoge) is '
'incorrect.', six.text_type(error))
self.m.VerifyAll()
| [
"kiku4@kinx.net"
] | kiku4@kinx.net |
50e4e33a9e057ace2c4b3719982e0faa15bf0597 | 863a1cda00ab2eda30a9463d69e471740ae7c515 | /models/SSD_DetNet.py | b458d2bf238bfabb08290b34efd92b1166b4bf1f | [] | no_license | sclzsx/Improved_SSD | ab9571c09c22589da61f00ecd42896ac194b3444 | bd6229a134188ab08115fa4105ec0c96f4824b0f | refs/heads/master | 2023-03-28T08:26:27.583019 | 2021-04-02T13:38:30 | 2021-04-02T13:38:30 | 354,029,229 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,365 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision.transforms as transforms
import torchvision.models as models
import torch.backends.cudnn as cudnn
import os
import torch.nn.init as init
from models.modules import *
from models.FPN import DetNet_FPN
from models.backbones.DetNet import DetNet
class SSD(nn.Module):
"""Single Shot Multibox Architecture
The network is composed of a base VGG network followed by the
added multibox conv layers. Each multibox layer branches into
1) conv2d for class conf scores
2) conv2d for localization predictions
3) associated priorbox layer to produce default bounding
boxes specific to the layer's feature map size.
See: https://arxiv.org/pdf/1512.02325.pdf for more details.
Args:
phase: (string) Can be "test" or "train"
size: input image size
base: VGG16 layers for input, size of either 300 or 500
extras: extra layers that feed to multibox loc and conf layers
head: "multibox head" consists of loc and conf conv layers
"""
def __init__(self, phase, backbone, neck, head, num_classes):
super(SSD, self).__init__()
self.phase = phase
self.num_classes = num_classes
# SSD network
self.base = backbone
# Layer learns to scale the l2 normalized features from conv4_3
#self.L2Norm = L2Norm(512, 20)
self.Norm = BasicRFB(128,128,stride = 1,scale=1.0)
self.fpn = neck
self.loc = nn.ModuleList(head[0])
self.conf = nn.ModuleList(head[1])
if phase == 'test':
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
"""Applies network layers and ops on input image(s) x.
Args:
x: input image or batch of images. Shape: [batch,3,300,300].
Return:
Depending on phase:
test:
Variable(tensor) of output class label predictions,
confidence score, and corresponding location predictions for
each object detected. Shape: [batch,topk,7]
train:
list of concat outputs from:
1: confidence layers, Shape: [batch*num_priors,num_classes]
2: localization layers, Shape: [batch,num_priors*4]
3: priorbox layers, Shape: [2,num_priors*4]
"""
loc = list()
conf = list()
# apply vgg up to conv4_3 relu
fpn_sources = self.base(x)
features = self.fpn(fpn_sources)
features[0] = self.Norm(features[0])
# apply multibox head to source layers
for (x, l, c) in zip(features, self.loc, self.conf):
loc.append(l(x).permute(0, 2, 3, 1).contiguous())
conf.append(c(x).permute(0, 2, 3, 1).contiguous())
loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)
conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)
if self.phase == "test":
output = (
loc.view(loc.size(0), -1, 4), # loc preds
self.softmax(conf.view(-1, self.num_classes)), # conf preds
)
else:
output = (
loc.view(loc.size(0), -1, 4),
conf.view(conf.size(0), -1, self.num_classes),
)
return output
def load_weights(self, base_file):
other, ext = os.path.splitext(base_file)
if ext == '.pkl' or '.pth':
print('Loading weights into state dict...')
self.load_state_dict(torch.load(base_file,
map_location=lambda storage, loc: storage))
print('Finished!')
else:
print('Sorry only .pth and .pkl files supported.')
def build_head(cfg, num_classes):
loc_layers = []
conf_layers = []
# 38*38 512
loc_layers += [nn.Conv2d(128,cfg[0] * 4, kernel_size=1, padding=0)]
conf_layers +=[nn.Conv2d(128,cfg[0] * num_classes, kernel_size=1, padding=0)]
# 19*19 512
loc_layers += [nn.Conv2d(128,cfg[1] * 4, kernel_size=1, padding=0)]
conf_layers +=[nn.Conv2d(128,cfg[1] * num_classes, kernel_size=1, padding=0)]
# 10*10 256
loc_layers += [nn.Conv2d(128,cfg[2] * 4, kernel_size=1, padding=0)]
conf_layers +=[nn.Conv2d(128,cfg[2] * num_classes, kernel_size=1, padding=0)]
# 10*10 256
loc_layers += [nn.Conv2d(128,cfg[3] * 4, kernel_size=1, padding=0)]
conf_layers +=[nn.Conv2d(128,cfg[3] * num_classes, kernel_size=1, padding=0)]
# 10*10 256
loc_layers += [nn.Conv2d(128,cfg[4] * 4, kernel_size=1, padding=0)]
conf_layers +=[nn.Conv2d(128,cfg[4] * num_classes, kernel_size=1, padding=0)]
# 10*10 256
loc_layers += [nn.Conv2d(128,cfg[5] * 4, kernel_size=1, padding=0)]
conf_layers +=[nn.Conv2d(128,cfg[5] * num_classes, kernel_size=1, padding=0)]
return (loc_layers, conf_layers)
mbox = {
'300': [6, 6, 6, 6, 6, 6], # number of boxes per feature map location
'512': [6, 6, 6, 6, 6, 4, 4],
}
def build_net(phase, size=300, num_classes=21):
if phase != "test" and phase != "train":
print("Error: Phase not recognized")
return
if size != 300 and size != 512:
print("Error: Sorry only RFBNet300 and RFBNet512 are supported!")
return
backbone = DetNet(num_classes)
neck = DetNet_FPN([128, 256, 256, 256, 256, 256])
head = build_head(mbox[str(size)], num_classes)
return SSD(phase, backbone, neck, head, num_classes)
if __name__ == "__main__":
net = build_net('test', num_classes=5)
# print(net)
# print(x.shape)
from ptflops import get_model_complexity_info
img_dim = 300
flops, params = get_model_complexity_info(net, (img_dim, img_dim), as_strings=True, print_per_layer_stat=True)
print('Flops: ' + flops)
print('Params: ' + params)
# def hook(self, input, output):
# print(output.data.cpu().numpy().shape)
#
# for m in net.modules():
# if isinstance(m, nn.Conv2d):
# m.register_forward_hook(hook)
net = net.cuda()
import time
with torch.no_grad():
x = torch.randn(1, 3, 300, 300).cuda()
s = time.clock()
y = net(x)
print(type(y), 1 / (time.clock() - s))
| [
"1044068981@qq.com"
] | 1044068981@qq.com |
948c0c259fac695a9fbfea3b1168e90cb825530f | c0d9354f89ae9a337572318b5524856a412e1d02 | /graph/127.word-ladder.py | 0f21841b5c4f2b86cc708d9426433e7f133a6970 | [] | no_license | caitaozhan/LeetCode | 5bab0ce09e8d484712247c01fcd0186997940000 | abb19fa2859634f5260d439812525bb14399ae55 | refs/heads/master | 2023-02-07T18:35:34.769990 | 2023-01-30T18:01:20 | 2023-01-30T18:01:20 | 139,379,363 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,748 | py | #
# @lc app=leetcode id=127 lang=python3
#
# [127] Word Ladder
#
# @lc code=start
# import line_profiler
import time
from typing import List
from collections import defaultdict
class SolutionTLE:
'''This O(n^2) solution will TLE, building the graph is too time costly
'''
# @profile
def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int:
if (not wordList) or (endWord not in wordList):
return 0
g = defaultdict(list)
end = 0 # endWord is the start of bfs
a = time.time()
size = len(wordList)
for i in range(size):
if wordList[i] == endWord:
end = i
for j in range(i+1, size):
if self.compare(wordList[i], wordList[j]) == 1:
g[i].append(j)
g[j].append(i)
print('build graph', time.time() - a)
dist = [0]*len(wordList)
visited = [False] * len(dist)
a = time.time()
self.bfs(g, end, dist, visited)
print('bfs time', time.time()-a)
min_dist = float('inf')
for i in range(len(wordList)):
if self.compare(beginWord, wordList[i]) == 1 and visited[i] is True:
min_dist = min(min_dist, dist[i])
return min_dist + 2 if min_dist != float('inf') else 0
def compare(self, word1, word2):
count = 0
for a, b in zip(word1, word2):
if a != b:
count += 1
if count == 2:
break
return count
def bfs(self, g, start, dist, visited):
queue = [start]
visited[start] = True
layer = 0
while queue:
new_queue = []
for cur in queue:
for nxt in g[cur]:
if visited[nxt] is False:
new_queue.append(nxt)
dist[nxt] = layer + 1
visited[nxt] = True
queue = new_queue
layer += 1
class Solution1:
'''to prevent the O(n^2) building a graph, design a O(n*m^2) method of using intermediate nodes.
here m << n. the real nodes are connected to the intermediate nodes.
'''
def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int:
if not wordList or endWord not in wordList:
return 0
wordList.append(beginWord) # put the beginWord into the list
graph = defaultdict(list) # not a 'traditional' graph. can view it as an intermediate graph
for word in wordList:
for i in range(len(beginWord)):
intermediate = self.get_intermediate(word, i)
graph[intermediate].append(word)
# the reverse link (word --> intermediate) is implicitely in the function self.get_intermediate()
not_visited = set(wordList)
# start = time.time()
dist = self.bfs(graph, endWord, beginWord, not_visited)
# print('bfs time', time.time() - start)
return dist
def bfs(self, graph, start, end, not_visited):
queue = [start]
not_visited.remove(start)
layer = 1
while queue:
new_queue = []
for cur in queue:
for i in range(len(cur)):
intermediate = self.get_intermediate(cur, i)
for nxt in graph[intermediate]:
if nxt == end:
return layer + 1
if nxt in not_visited:
new_queue.append(nxt)
not_visited.remove(nxt)
queue = new_queue
layer += 1
return 0
def get_intermediate(self, word, i):
return word[:i] + '*' + word[i+1:]
class Solution:
'''compared to Solution1, this one uses more memory (one more graph), but reduces the time
The bfs's time is smaller, because the graph's # of node is decreased to O(n) from O(n*m)
'''
def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int:
# step 1: create the graph O(V + E)
template_dict = defaultdict(list)
for word in wordList:
for i in range(len(word)):
template = word[:i] + '*' + word[i+1:]
template_dict[template].append(word)
g = defaultdict(list)
for key, val in template_dict.items():
if len(val) == 1:
continue
for i in range(len(val)):
for j in range(i + 1, len(val)):
g[val[i]].append(val[j])
g[val[j]].append(val[i])
# step 2: do the bfs (layer by layer)
# start = time.time()
queue = [endWord]
visited = set([endWord])
level = 0
dist = {endWord:level}
while queue:
new_queue = []
for cur in queue:
for nxt in g[cur]:
if nxt not in visited:
dist[nxt] = level + 1
visited.add(nxt)
new_queue.append(nxt)
queue = new_queue
level += 1
# print('bfs time', time.time() - start)
# step 3: get the answer
minn = float('inf')
for i in range(len(beginWord)):
template = beginWord[:i] + '*' + beginWord[i+1:]
for nodes in template_dict[template]:
if nodes in dist:
minn = min(minn, dist[nodes])
if minn == float('inf'):
return 0
else:
return minn + 2
start = "charge"
end = "comedo"
wordList = ["shanny","shinny","whinny","whiney","shiver","sharer","scarer","scaler","render","fluxes","teases","starks","clinks","messrs","crewed","donner","blurts","bettye","powell","castes","hackee","hackle","heckle","deckle","decile","defile","define","refine","repine","rapine","ravine","raving","roving","chased","roping","coping","coming","homing","pointy","hominy","homily","homely","comely","comedy","comedo","vagues","crocus","spiked","bobbed","dourer","smells","feared","wooden","stings","loafer","pleads","gaiter","meeter","denser","bather","deaves","wetted","pleats","cadger","curbed","grover","hinged","budget","gables","larked","flunks","fibbed","bricks","bowell","yonder","grimes","clewed","triads","legion","lacier","ridden","bogied","camper","damien","spokes","flecks","goosed","snorer","choked","choler","leakey","vagued","flumes","scanty","bugger","tablet","nilled","julies","roomed","ridges","snared","singes","slicks","toiled","verged","shitty","clicks","farmed","stunts","dowsed","brisks","skunks","linens","hammer","naiver","duster","elates","kooked","whacky","mather","loomed","soured","mosses","keeled","drains","drafty","cricks","glower","brayed","jester","mender","burros","arises","barker","father","creaks","prayed","bulges","heaped","called","volley","girted","forded","huffed","bergen","grated","douses","jagger","grovel","lashes","creeds","bonier","snacks","powder","curled","milker","posers","ribbed","tracts","stoked","russel","bummer","cusses","gouged","nailed","lobbed","novels","stands","caches","swanks","jutted","zinged","wigged","lunges","divers","cranny","pinter","guides","tigers","traces","berber","purges","hoaxer","either","bribed","camped","funked","creaky","noises","paused","splits","morrow","faults","ladies","dinged","smoker","calved","deters","kicker","wisher","ballad","filled","fobbed","tucker","steams","rubber","staled","chived","warred","draped","curfew","chafed","washer","tombed","basket","limned","rapped","swills","gashed","loaner","settee","layers","bootee","rioted","prance","sharps","wigner","ranted","hanker","leaden","groped","dalian","robbed","peeled","larder","spoofs","pushed","hallie","maiden","waller","pashas","grains","pinked","lodged","zipper","sneers","bootie","drives","former","deepen","carboy","snouts","fained","wilmer","trance","bugles","chimps","deeper","bolder","cupped","mauser","pagers","proven","teaser","plucky","curved","shoots","barged","mantes","reefer","coater","clotho","wanner","likens","swamis","troyes","breton","fences","pastas","quirky","boiler","canoes","looted","caries","stride","adorns","dwells","hatred","cloths","rotted","spooks","canyon","lances","denied","beefed","diaper","wiener","rifled","leader","ousted","sprays","ridged","mousey","darken","guiled","gasses","suited","drools","bloody","murals","lassie","babied","fitter","lessee","chiles","wrongs","malian","leaves","redder","funnel","broths","gushes","grants","doyens","simmer","locked","spoors","berger","landed","mosley","scorns","whiten","hurled","routed","careen","chorus","chasms","hopped","cadged","kicked","slewed","shrewd","mauled","saucer","jested","shriek","giblet","gnarls","foaled","roughs","copses","sacked","blends","slurps","cashew","grades","cramps","radius","tamped","truths","cleans","creams","manner","crimps","hauled","cheery","shells","asters","scalps","quotas","clears","clover","weeder","homers","pelted","hugged","marked","moaned","steely","jagged","glades","goshes","masked","ringer","eloped","vortex","gender","spotty","harken","hasten","smiths","mulled","specks","smiles","vainer","patted","harden","nicked","dooley","begged","belief","bushel","rivers","sealed","neuter","legged","garter","freaks","server","crimea","tossed","wilted","cheers","slides","cowley","snotty","willed","bowled","tortes","pranks","yelped","slaved","silver","swords","miners","fairer","trills","salted","copsed","crusts","hogged","seemed","revert","gusted","pixies","tamika","franks","crowed","rocked","fisher","sheers","pushes","drifts","scouts","sables","sallie","shiner","coupes","napped","drowse","traced","scenes","brakes","steele","beater","buries","turned","luther","bowers","lofted","blazer","serves","cagney","hansel","talker","warmed","flirts","braced","yukked","milken","forged","dodder","strafe","blurbs","snorts","jetted","picket","pistil","valved","pewter","crawls","strews","railed","clunks","smiled","dealer","cussed","hocked","spited","cowers","strobe","donned","brawls","minxes","philby","gavels","renter","losses","packet","defied","hazier","twines","balled","gaoled","esther","narrow","soused","crispy","souped","corned","cooley","rioter","talley","keaton","rocker","spades","billie","mattel","billet","horton","navels","sander","stoker","winded","wilder","cloyed","blazed","itched","docked","greene","boozed","ticket","temped","capons","bravos","rinded","brandi","massed","sobbed","shapes","yippee","script","lesion","mallet","seabed","medals","series","phases","grower","vertex","dented","tushed","barron","toffee","bushes","mouser","zenger","quaked","marley","surfed","harmed","mormon","flints","shamed","forgot","jailor","boater","sparer","shards","master","pistol","tooted","banned","drover","spices","gobbed","corals","chucks","kitten","whales","nickel","scrape","hosted","hences","morays","stomps","marcel","hummed","wonder","stoves","distil","coffer","quaker","curler","nurses","cabbed","jigger","grails","manges","larger","zipped","rovers","stints","nudges","marlin","exuded","storey","pester","longer","creeps","meaner","wallop","dewier","rivera","drones","valued","bugled","swards","cortes","charts","benson","wreaks","glares","levels","smithy","slater","suites","paired","fetter","rutted","levied","menses","wither","woolly","weeded","planed","censer","tested","pulled","hitter","slicer","tartar","chunky","whirrs","mewled","astern","walden","hilton","cached","geller","dolled","chores","sorter","soothe","reused","clumps","fueled","hurler","helled","packed","ripped","tanned","binder","flames","teased","punker","jerked","cannon","joists","whited","sagged","heaven","hansen","grayer","turfed","cranks","stater","bunted","horsey","shakes","brands","faints","barber","gorged","creamy","mowers","scrams","gashes","knacks","aeries","sticks","altars","hostel","pumped","reeves","litter","hoaxed","mushed","guided","ripper","bought","gelled","ranker","jennie","blares","saloon","bomber","mollie","scoops","coolie","hollis","shrunk","tattle","sensed","gasket","dodoes","mapped","strips","dodges","sailed","talked","sorted","lodges","livest","pastel","ladles","graded","thrice","thales","sagger","mellon","ganged","maroon","fluked","raised","nannie","dearer","lither","triked","dorset","clamps","lonnie","spates","larded","condor","sinker","narced","quaver","atones","farted","elopes","winger","mottle","loaned","smears","joanne","boozes","waster","digger","swoops","smokey","nation","drivel","ceased","miffed","faiths","pisses","frames","fooled","milled","dither","crazed","darryl","mulder","posses","sumter","weasel","pedals","brawny","charge","welted","spanks","sallow","joined","shaker","blocks","mattie","swirls","driver","belles","chomps","blower","roared","ratted","hailed","taunts","steamy","parrot","deafer","chewed","spaces","cuffed","molded","winked","runnel","hollow","fluted","bedded","crepes","stakes","vested","parley","burton","loiter","massey","carnap","closed","bailed","milder","heists","morale","putter","snyder","damion","conned","little","pooped","ticced","cocked","halves","wishes","francs","goblet","carlin","pecked","julius","raster","shocks","dawned","loosen","swears","buried","peters","treats","noshed","hedges","trumps","rabies","ronnie","forces","ticked","bodies","proved","dadoes","halved","warner","divest","thumbs","fettle","ponies","testis","ranked","clouts","slates","tauted","stools","dodged","chancy","trawls","things","sorrow","levies","glides","battle","sauced","doomed","seller","strove","ballet","bumper","gooses","foiled","plowed","glints","chanel","petals","darted","seared","trunks","hatter","yokels","vanned","tweedy","rubles","crones","nettie","roofed","dusted","dicker","fakers","rusted","bedder","darrin","bigger","baylor","crocks","niches","tented","cashed","splats","quoted","soloed","tessie","stiles","bearer","hissed","soiled","adored","bowery","snakes","wagers","rafter","crests","plaids","cordon","listed","lawson","scared","brazos","horded","greens","marred","mushes","hooper","halter","ration","calked","erodes","plumed","mummer","pinged","curios","slated","ranter","pillow","frills","whaled","bathos","madden","totted","reamed","bellow","golfer","seaman","barred","merger","hipped","silken","hastes","strays","slinks","hooted","convex","singed","leased","bummed","leaner","molted","naught","caters","tidied","forges","sealer","gulled","plumps","racket","fitted","rafted","drapes","nasser","tamara","winced","juliet","ledger","bettie","howell","reeved","spiced","thebes","apices","dorsey","welled","feeler","warded","reader","folded","lepers","cranky","bosses","ledges","player","yellow","lunged","mattes","confer","malign","shared","brandy","filmed","rhinos","pulsed","rouses","stones","mixers","cooped","joiner","papped","liston","capote","salvos","wicker","ciders","hoofed","wefted","locket","picker","nougat","limpid","hooter","jailer","peaces","mashes","custer","wallis","purees","trends","irater","honied","wavers","tanner","change","hinges","tatted","cookie","catnap","carton","crimed","betted","veined","surges","rumped","merlin","convey","placid","harped","dianna","hookey","nobles","carted","elided","whined","glover","bleats","stales","husker","hearer","tartan","weaker","skewer","lumbar","temper","gigged","gawked","mayors","pigged","gather","valves","mitten","largos","boreas","judges","cozens","censor","frilly","dumbed","downer","jogger","scolds","danced","floras","funded","lumped","dashes","azores","quites","chunks","washed","duller","bilges","cruels","brooks","fishes","smoked","leaped","hotter","trials","heaves","rouges","kissed","sleety","manses","spites","starts","banded","clings","titted","vetoed","mister","mildew","wailed","sheets","peeked","passer","felted","broken","lieges","ruffed","bracts","buster","muffed","lanker","breaks","coffey","sighed","charms","balded","kisser","booths","leaven","cheeps","billed","lauder","bumped","career","stocks","airier","limped","jeanie","roamed","carves","lilted","router","bonnie","denver","briggs","steeps","nerves","oinked","bucked","hooves","dancer","burris","parked","swells","collie","perked","cooler","fopped","wedder","malted","sabers","lidded","conner","rogues","fought","dapper","purled","crowds","barnes","bonner","globed","goners","yankee","probes","trains","sayers","jersey","valley","vatted","tauter","dulled","mucked","jotted","border","genres","banked","filter","hitler","dipper","dollie","sieves","joliet","tilted","checks","sports","soughs","ported","causes","gelded","mooter","grills","parred","tipped","placer","slayer","glided","basked","rinses","tamper","bunged","nabbed","climbs","faeces","hanson","brainy","wicket","crowns","calmed","tarred","spires","deanne","gravel","messes","snides","tugged","denier","moslem","erased","mutter","blahed","hunker","fasten","garbed","cracks","braked","rasped","ravens","mutton","tester","tories","pinker","titled","arisen","softer","woolen","disses","likest","dicier","nagged","lipton","plumbs","manged","faulty","sacred","whiter","erases","padres","haired","captor","metals","cardin","yowled","trusts","revels","boxers","toured","spouts","sodded","judged","holley","figged","pricey","lapses","harper","beaned","sewers","caused","willie","farmer","pissed","bevies","bolled","bugler","votive","person","linton","senses","supped","mashed","pincer","wetter","tangos","sticky","lodger","loader","daunts","peaked","moused","sleeps","lasted","tasked","awards","lovely","gushed","spurts","canter","mantis","coaled","groans","dannie","oopses","sneaky","vogues","mobile","plumes","chides","theses","marcia","parser","flexed","stayed","fouler","tusked","quartz","daubed","clancy","rouged","flaked","norton","dunner","corded","shelly","hester","fucker","polled","rodger","yeager","zinced","livens","browne","gonged","pubbed","sapped","thrive","placed","jensen","moises","scopes","stumpy","stocky","heller","levers","morals","wheres","gasped","jobber","leaved","champs","rosier","pallet","shooed","parses","bender","closet","pureed","routes","verges","bulled","foster","rummer","molten","condos","better","cotter","lassos","grafts","vendor","thrace","codded","tinker","bullet","beaker","garden","spiels","popper","skills","plated","farrow","flexes","esters","brains","handel","puller","dickey","creeks","ballot","singer","sicker","spayed","spoils","rubier","missed","framed","bonnet","molder","mugger","waived","taster","robles","tracks","nearer","lister","horsed","drakes","lopped","lubber","busied","button","eluded","ceases","sought","realer","lasers","pollen","crisps","binned","darrel","crafty","gleams","lonely","gordon","harley","damian","whiles","wilton","lesser","mallow","kenyon","wimped","scened","risked","hunter","rooter","ramses","inches","goaded","ferber","freaky","nerved","spoken","lovers","letter","marrow","bulbed","braver","sloped","breads","cannes","bassos","orated","clever","darren","bredes","gouger","servos","trites","troths","flunky","jammed","bugged","watter","motive","humped","writer","pestle","rilled","packer","foists","croats","floury","napier","floors","scotty","sevens","harrow","welter","quacks","daybed","lorded","pulses","pokier","fatten","midges","joints","snoopy","looter","monies","canted","riffed","misses","bunker","piston","yessed","earner","hawked","wedged","brewer","nested","graver","hoaxes","slaves","pricks","magpie","bernie","rapier","roster","poohed","corner","trysts","rogers","whirls","bathed","teasel","opener","minced","sister","dreamy","worker","rinked","panted","triton","mervin","snowed","leafed","thinks","lesson","millet","larson","lagged","likely","stormy","fortes","hordes","wovens","kinked","mettle","seated","shirts","solver","giants","jilted","leaded","mendez","lowers","bidder","greats","pepped","flours","versus","canton","weller","cowper","tapped","dueled","mussed","rubies","bonged","steals","formed","smalls","sculls","docket","ouster","gunned","thumps","curred","withes","putted","buttes","bloats","parsed","galley","preses","tagged","hanger","planes","chords","shafts","carson","posits","zinger","solves","tensed","tastes","rinsed","kenned","bitten","leslie","chanty","candor","daises","baggie","wedded","paints","moored","haloed","hornet","lifted","fender","guiles","swifts","flicks","lancer","spares","pellet","passed","finked","joanna","bidden","swamps","lapped","leered","served","shirrs","choker","limper","marker","nudged","triter","thanks","peered","bruins","loaves","fabled","lathes","pipers","hooped","orates","burned","swines","sprats","warder","colder","crazes","reined","prized","majors","darrow","waifed","rooked","rickey","patter","shrive","gropes","gassed","throve","region","weaken","hettie","walton","galled","convoy","wesson","exudes","tinted","clanks","blinks","slacks","stilts","franny","socket","wished","kidded","knotty","turves","cashes","geared","sunned","glowed","sadden","harlem","testes","sweets","becket","blazes","batter","fellow","clovis","copier","shaped","husked","gimlet","rooney","taints","sashes","bossed","cootie","franck","probed","bagged","smocks","batten","spared","chills","relics","meyers","grader","tromps","dimmer","pasted","pepper","capped","played","junket","easier","palmed","pander","vaguer","bulged","dissed","borges","raises","wallow","jigged","bogged","burped","neater","rammed","fibers","castor","skirts","cancer","tilled","spored","dander","denims","budges","trucks","sowers","yapped","cadges","wrists","hacker","graved","vipers","noshes","minted","lessor","cassia","wrecks","hidden","brando","honeys","chilli","ragged","breded","punier","stacey","sisses","jocked","croaks","dinned","walker","heston","flares","coined","cannot","chocks","leases","wander","balder","warmer","bawled","donnie","damson","header","chilly","models","simper","watery","milked","poises","combed","toilet","gallop","sonnet","loosed","yawned","splays","pauses","bother","graphs","shrews","scones","manuel","milers","hotels","bennie","flores","spells","grimed","tenses","staged","puffer","posies","motion","fudged","fainer","tatter","seraph","nansen","months","muppet","tamera","shaman","falser","becker","lisbon","clefts","weeper","mendel","girder","takers","torsos","forked","dances","stated","yelled","scants","frothy","rolled","yodels","listen","craned","brooms","suffer","easter","shills","craves","bleeps","belled","dished","bordon","zither","jacket","lammer","kirked","shaved","atoned","frumpy","nosier","vender","graced","clingy","chants","wrests","cursed","prunes","tarter","stripe","coffee","veiled","tweeds","shrine","spines","kegged","melvin","gasser","market","marten","peeped","sanger","somber","spider","netted","radium","slings","scarfs","mended","creels","shaves","payers","bunked","movers","beings","conked","cozies","benton","codger","prints","gusset","longed","burner","jambed","mullet","fogged","scores","carbon","sleeks","helped","waxier","gilded","harlot","winces","tenser","lowell","ramsey","kennan","booted","beaver","rested","shouts","hickey","looped","swings","wonted","dilled","defers","lolled","pupped","cruets","solved","romper","defter","chokes","kithed","garnet","bookie","stared","stares","latter","lazies","fanned","wagged","dunces","corked","cloned","prided","baxter","pusses","boomed","masses","warren","weaves","delves","handed","merton","lusher","hepper","gibber","sender","parsec","snares","masher","seamed","sweats","welles","gagged","curter","mother","beeped","vealed","shoved","slaver","hacked","gutted","ranged","bashed","closer","storks","meshed","cortex","copper","severn","gripes","carlos","scares","crates","boiled","ginned","mouses","raided","greyed","verier","slopes","fenced","sniper","priced","flawed","buffed","spacey","favors","platen","miller","walled","cutter","skated","holier","beamed","waiter","drowns","clomps","quarks","bested","frisks","purged","scalds","marian","flower","howled","plover","bikers","trails","hagged","smirks","sitter","carmen","lanced","plants","nobler","yakked","thesis","lassen","margin","wagner","sifter","houses","screws","booker","dormer","meters","padded","loaded","cartel","sutton","willis","chatty","dunked","dreamt","dalton","fables","coveys","muller","shanty","adders","tailor","helper","liters","butted","maiman","hollie","gallon","xavier","shrank","mickey","rather","powers","keened","doused","kisses","flanks","dotted","phased","dumped","linger","kramer","spaced","soften","strife","rowers","hovers","crimes","crooks","carrel","braces","lander","shrove","skulks","banker","itches","dropsy","misted","pulped","cloche","fawned","states","teared","beeper","raider","groves","livery","aerier","keenan","severe","sabres","bogies","coated","harlow","tanked","mellow","cozier","shanks","spooky","blamed","tricks","sleets","punted","jumped","caxton","warped","halley","frisky","shines","skater","lumber","truces","sliced","gibbet","narked","chives","graves","gummed","holler","glazes","nieves","hushed","nought","prated","chored","cloudy","kidder","huston","straws","twined","gifted","rodney","haloes","france","wirier","mercia","rubbed","coaxed","sumner","snipes","nipper","leiden","madman","margie","footed","firmed","budded","froths","senior","hoover","tailed","glider","straps","stalks","billow","racked","javier","zoomed","shades","whores","braids","roused","sudden","dogies","fencer","snaked","flings","traded","gunner","snider","staten","levees","lathed","sailor","waited","muster","clothe","lulled","cargos","revved","sooths","flamed","borers","feller","bladed","oliver","collin","wusses","murder","parted","jailed","frayed","doored","cheeks","misled","belted","winter","merges","shaven","fudges","tabbed","forget","sloths","cachet","mealed","sassed","salter","haunts","ranger","rivets","deeded","reaped","damped","crated","youths","whacks","tamers","misery","seeped","eerier","tiller","busses","gloved","hushes","cronus","pruned","casket","direst","guilds","motley","spools","fevers","snores","greece","elides","waists","rattle","trader","juster","rashes","stoney","pipped","solder","sinner","prides","rugged","steers","gnarly","titter","cities","walter","stolen","steaks","hawker","weaned","jobbed","jacked","pikers","hipper","spoilt","beeves","craved","gotten","balked","sherry","looney","crisis","callie","swiped","fished","rooted","bopped","bowler","escher","chumps","jerrod","lefter","snooty","fillet","scales","comets","lisped","decked","clowns","horned","robber","bottle","reeled","crapes","banter","martel","dowels","brandt","sweeps","heeled","tabled","manors","danger","dionne","prayer","decker","millie","boated","damned","horses","globes","failed","lammed","nigher","joyner","sobers","chided","tipper","parcel","flakes","fugger","elated","hinder","hopper","crafts","wipers","badder","jessie","matted","wafted","pealed","cheats","elites","torres","bushed","sneaks","tidies","brings","stalls","payees","zonked","danker","poshes","smelts","stoops","warden","chicks","ramsay","budged","firmer","glazed","heated","slices","hovels","belied","shifts","pauper","tinges","weston","casted","titles","droves","roomer","modals","seamen","wearer","blonde","berlin","libbed","tensor","hokier","lambed","graped","headed","copped","eroses","fagged","filler","keener","stages","civets","spills","tithed","sullen","sucked","briton","whaler","hooded","tittle","bucket","furled","darned","planet","clucks","batted","dagger","brides","severs","pathos","grainy","relied","carpel","makers","lancet","slowed","messed","ravels","faster","gabbed","chance","grayed","santos","spends","chinos","saints","swirly","dories","wilson","milton","clangs","manual","nodded","signer","stript","etched","vaster","wastes","stored","minces","purred","marvin","pinned","skulls","heaved","wadded","fowled","hashed","mullen","relief","hatted","primed","chaffs","canned","lackey","showed","shandy","chases","maggie","deafen","bussed","differ","worked","marted","ducked","socked","fussed","greyer","herder","trusty","follow","samson","babies","whorls","stanks","manson","cranes","murrow","shrink","genius","holder","lenses","yucked","termed","ruined","junker","belies","joshed","cooled","basted","greeks","fuller","healer","carver","havens","drunks","sucker","lotion","glared","healed","pocked","rifles","weaved","canoed","punter","hinton","settle","boobed","hinted","scored","harder","status","sloven","hayden","golfed","scoots","bloods","slaked","jugged","louses","cassie","shaded","rushed","pitied","barked","honked","rasher","forced","shaver","vowels","holden","pelvis","blades","chests","preyer","floods","deanna","cation","mapper","falter","dabbed","mocker","nestle","shucks","heeded","ticker","binges","summer","slumps","lusted","scampi","crofts","gorges","pardon","torses","smokes","lashed","bailey","jabbed","calmer","preset","forbes","hasted","wormed","winged","minors","banner","grazed","hewers","kernel","jolted","sniped","clunky","ratios","blinds","ganges","misers","spikes","riders","hallow","grumpy","barren","summed","infers","places","jarred","killer","plaint","goofed","subbed","prudes","sipped","kookie","whines","droopy","palled","cherry","proves","mobbed","spaded","cheese","pluses","bathes","motels","spewed","soaked","howler","puffed","malled","shrike","slided","fulled","pouted","shames","lessen","ringed","teemed","grands","linked","wooten","feuded","deaden","scents","flutes","salton"]
s = Solution()
print(len(wordList))
a = time.time()
print(s.ladderLength(start, end, wordList))
print(time.time() - a)
s = Solution1()
print(len(wordList))
a = time.time()
print(s.ladderLength(start, end, wordList))
print(time.time() - a)
s = SolutionTLE()
print(len(wordList))
a = time.time()
print(s.ladderLength(start, end, wordList))
print(time.time() - a)
# @lc code=end
| [
"caitaozhan@gmail.com"
] | caitaozhan@gmail.com |
43b21f98599d04e5232fe8734a9a9ac30960cbdc | 671ec3242f1d83846560ccf34ea9a924c6e37354 | /pandastabletooltip/main.py | 4a87276cf93fc0409da350ba10dbec02bdf9b0aa | [
"MIT"
] | permissive | simon-ritchie/pandas-table-tooltip | a4590731d57d73ac954cf221d9a2af9fe3ed6aef | cf85501f0502c4d7c7b67dfdbe592c69f5be59c8 | refs/heads/master | 2020-06-24T15:30:20.024367 | 2019-07-27T03:48:16 | 2019-07-27T03:48:16 | 199,001,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,866 | py | """A module that handles tooltip display.
"""
import pandas as pd
from IPython.display import HTML, display
def make_table_html_with_tooltip(df, limit=3000):
"""
Make a table with tooltips.
Parameters
----------
df : DataFrame
DataFrame to be displayed.
limit : int, default 3000
Display limit number.
Raises
------
ValueError
If the number of DataFrame rows exceeds the display
limit number.
Returns
-------
html : HTML
Result HTML object.
"""
if len(df) > limit:
err_msg = 'The number of DataFrame rows exceeds the dispaly limit '\
'(currently limited {limit_num}). '\
'Please adjust the `limit` argument.'.format(
limit_num=limit)
raise ValueError(err_msg)
html_str = '<table border="1" class="dataframe">'
html_str += '\n <thead>'
html_str += '\n <tr style="text-aligh: right;">'
html_str += '\n <th></th>'
for column in df.columns:
html_str += '\n <th>{column}</th>'.format(column=column)
html_str += '\n </tr>'
html_str += '\n </thead>'
for index_val, sr in df.iterrows():
html_str += '\n <tbody>'
html_str += '\n <tr>'
html_str += '\n <th>{index_val}</th>'.format(
index_val=index_val)
for column_val, value in sr.iteritems():
tooltip = '{index_val}, {column_val}'.format(
index_val=index_val,
column_val=column_val)
html_str += \
'\n <td title="{tooltip}">'\
'{value}</td>'.format(
tooltip=tooltip,
value=value)
html_str += '\n </tr>'
html_str += '\n </tbody>'
html_str += '\n</table>'
return HTML(html_str)
| [
"antisocial.sid2@gmail.com"
] | antisocial.sid2@gmail.com |
a9aa83125c49314aac7eec6297fd67dfd86331f6 | 4c7914bf0eb52f2fe5dab70fa630a322a9449e05 | /learnOpencv/venv/Scripts/easy_install-3.6-script.py | a1f29dc75bf1e2af2366ebf9c88f94bdff507b63 | [] | no_license | xhongc/pythonCrawl | f334d737326a47782d2533c4db23734729f13099 | a38e59496dd78b6e070ea6882043b1744190103e | refs/heads/master | 2022-12-10T01:22:01.608193 | 2020-01-12T09:43:19 | 2020-01-12T09:43:22 | 93,115,695 | 4 | 5 | null | 2022-11-22T02:36:28 | 2017-06-02T01:47:22 | Python | UTF-8 | Python | false | false | 464 | py | #!C:\Users\xhongc\work\pythonCrawl\learnOpencv\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.6'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.6')()
)
| [
"408737515@qq.com"
] | 408737515@qq.com |
b42555da9cd6e0002fc0b67b79a21e51526bd952 | f1cdcfe600aa10c871486c2cf5a91f23a00b5e81 | /ch5/dorm.py | 9db7ace7cf0d6f604f3e16dec60cb2c322507726 | [] | no_license | mccarvik/collective_intelligence | 58268c4f5bcf38466951e3ddf96aba1ad05aaa7e | 9bf448eea62fa59e2ec97fdca0cafeb1d4ce5c50 | refs/heads/master | 2021-09-04T22:38:13.581625 | 2018-01-22T20:00:04 | 2018-01-22T20:00:04 | 106,424,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,933 | py | import random, math, pdb, time
import optimization
# The dorms, each of which has two available spaces
dorms=['Zeus','Athena','Hercules','Bacchus','Pluto']
# People, along with their first and second choices
prefs=[('Toby', ('Bacchus', 'Hercules')),
('Steve', ('Zeus', 'Pluto')),
('Karen', ('Athena', 'Zeus')),
('Sarah', ('Zeus', 'Pluto')),
('Dave', ('Athena', 'Bacchus')),
('Jeff', ('Hercules', 'Pluto')),
('Fred', ('Pluto', 'Athena')),
('Suzie', ('Bacchus', 'Hercules')),
('Laura', ('Bacchus', 'Hercules')),
('James', ('Hercules', 'Athena'))]
# [(0,9),(0,8),(0,7),(0,6),...,(0,0)]
domain=[(0,(len(dorms)*2)-i-1) for i in range(0,len(dorms)*2)]
def printsolution(vec):
slots=[]
# Create two slots for each dorm
for i in range(len(dorms)): slots+=[i,i]
# Loop over each students assignment
for i in range(len(vec)):
x=int(vec[i])
# Choose the slot from the remaining ones
dorm=dorms[slots[x]]
# Show the student and assigned dorm
print prefs[i][0],dorm
# Remove this slot
del slots[x]
def dormcost(vec):
pdb.set_trace()
cost=0
# Create list a of slots
slots=[0,0,1,1,2,2,3,3,4,4]
# Loop over each student
for i in range(len(vec)):
x=int(vec[i])
# The selection of vec in slots is going to be in relation to what is left in slot
# EX: vec could have 4 0's as that will be the fist number in slots which will keep changing
dorm=dorms[slots[x]]
pref=prefs[i][1]
# First choice costs 0, second choice costs 1
if pref[0]==dorm: cost+=0
elif pref[1]==dorm: cost+=1
else: cost+=3
# Not on the list costs 3
# Remove selected slot
del slots[x]
return cost
if __name__ == "__main__":
s = optimization.randomoptimize(domain, dormcost)
print(dormcost(s))
s = optimization.geneticoptimize(domain, dormcost)
print(dormcost(s))
| [
"mccarviks@gmail.com"
] | mccarviks@gmail.com |
d3dea0d179f7790f5f123aabbfaac8f1eac7b1f8 | 0a1f8957a798006deaa53d10d09f733fab1e6b05 | /src/CADAssembler/PostProcessing/Calculix_PostProcess.py | 53efc40f6ca44a42b92ac38869812a268b66624d | [
"LicenseRef-scancode-other-permissive"
] | permissive | metamorph-inc/meta-core | a89504ccb1ed2f97cc6e792ba52e3a6df349efef | bc7a05e04c7901f477fe553c59e478a837116d92 | refs/heads/master | 2023-03-07T02:52:57.262506 | 2023-03-01T18:49:49 | 2023-03-01T18:49:49 | 40,361,476 | 25 | 15 | NOASSERTION | 2023-01-13T16:54:30 | 2015-08-07T13:21:24 | Python | UTF-8 | Python | false | false | 5,229 | py | #title :Calculix_PostProcess.py
#description :This script performs post processing on Calculix output files (.frd).
#author :Di Yao
#date :2012-6-19
#version :1.0.0.0
#usage :python pyscript.py
#notes :
#python_version :2.7
#==============================================================================
import sys
import ComputedMetricsSummary
import math
import AnalysisFunctions
import re
import utility_functions
def ParseCalculixOutputFile(feaName):
skipKey = False
sectionData = list()
f = open(feaName+'.dat', 'r')
for line in f:
line = line.strip()
if line == '': continue
if (line.startswith('stresses')):
skipKey = False
if (len(sectionData) > 0):
CalculateMetrics(sectionData)
#print '=============================='
#print sectionData
sectionData = []
elif (line.startswith('displacements')):
skipKey = False
if (len(sectionData) > 0):
CalculateMetrics(sectionData)
#print '=============================='
#print sectionData
sectionData = []
elif (line.startswith('forces')):
skipKey = True
if (len(sectionData) > 0):
CalculateMetrics(sectionData)
sectionData=[]
continue
if (skipKey == False):
sectionData.append(line)
if (len(sectionData) > 0):
CalculateMetrics(sectionData)
#print '=============================='
#print sectionData
sectionData = []
f.close()
def CalculateMetrics(sectionData):
keyLine = sectionData.pop(0)
if (keyLine.startswith('stresses')):
keys = keyLine.split()
ELSet_ID = keys[5]
maxMises = 0
maxShear = 0
maxBearing = 0
for data in sectionData:
splittedLine = data.split()
stressMatrix = splittedLine[2:] #stressLevels
tmpMise, tmpBear, tmpShear = AnalysisFunctions.FindStressMetrics(stressMatrix)
maxMises = max(maxMises, tmpMise)
maxShear = max(maxShear, tmpShear)
maxBearing = max(maxBearing, tmpBear)
# FactorOfSafety
if (ComputedMetricsSummary.gComponentList.has_key(ELSet_ID)):
tmpComponent = ComputedMetricsSummary.gComponentList[ELSet_ID]
#factorOfSafety = min(float(tmpComponent.MaterialProperty['Shear'])/maxShear,
# float(tmpComponent.MaterialProperty['Bearing'])/maxBearing,
# float(tmpComponent.MaterialProperty['Mises'])/maxMises)
factorOfSafety = float(tmpComponent.MaterialProperty['Mises'])/maxMises
if (tmpComponent.MetricsInfo.has_key('Shear')):
tmpComponent.MetricsOutput[tmpComponent.MetricsInfo['Shear']] = maxShear
if (tmpComponent.MetricsInfo.has_key('Mises')):
tmpComponent.MetricsOutput[tmpComponent.MetricsInfo['Mises']] = maxMises
if (tmpComponent.MetricsInfo.has_key('Bearing')):
tmpComponent.MetricsOutput[tmpComponent.MetricsInfo['Bearing']] = maxBearing
if (tmpComponent.MetricsInfo.has_key('FactorOfSafety')):
tmpComponent.MetricsOutput[tmpComponent.MetricsInfo['FactorOfSafety']] = factorOfSafety
ComputedMetricsSummary.gComponentList[ELSet_ID] = tmpComponent #?
elif (keyLine.startswith('displacements')):
displacementData = dict()
for data in sectionData:
splittedLine = data.split()
displacementData[splittedLine[0]] = AnalysisFunctions.FindDisplacementMagnitude ( float(splittedLine[1]),
float(splittedLine[2]),
float(splittedLine[3]))
if __name__ == '__main__':
try:
feaName = None
paramFile = None
argList = sys.argv
argc = len(argList)
i = 0
while (i < argc):
if (argList[i][:2] == '-i'):
i+=1
feaName = utility_functions.right_trim(argList[i], '.dat')
elif (argList[i][:2] == '-p'):
i+=1
paramFile = argList[i]
i+=1
if not feaName or not paramFile:
exit(1)
ComputedMetricsSummary.ParseXMLFile(paramFile)
ComputedMetricsSummary.PrintComponentList(ComputedMetricsSummary.gComponentList)
ParseCalculixOutputFile(feaName)
ComputedMetricsSummary.WriteXMLFile(ComputedMetricsSummary.gComponentList)
except Exception as e:
print e
print type(e) # prints the type of exception
print type(e).__name__ # prints the type's name
except ZeroDivisionError:
print "division by zero!"
| [
"kevin.m.smyth@gmail.com"
] | kevin.m.smyth@gmail.com |
5e24f92dbeb200f2d413edb16c62470ebe24c5dd | fb5d9f9b4ae3d7059d582ebb390916c2f9528852 | /models/pix2pix_model.py | e60d32a75ef9b52d29453b688af008de8946200f | [] | no_license | tianxiaguixin002/Code-Implementation-of-Super-Resolution-ZOO | 32d4168f4d8d031968b7a601cf68b50730b15b06 | f6ccf309c7653a27173de5184d17bb5933baab14 | refs/heads/master | 2022-11-13T17:09:11.484532 | 2020-07-06T01:51:25 | 2020-07-06T01:51:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,404 | py | import torch
from .base_model import BaseModel
from . import base_networks
class Pix2PixModel(BaseModel):
""" This class implements the pix2pix model, for learning a mapping from input images to output images given paired data.
The model training requires '--dataset_mode aligned' dataset.
By default, it uses a '--netG unet256' U-Net generator,
a '--netD basic' discriminator (PatchGAN),
and a '--gan_mode' vanilla GAN loss (the cross-entropy objective used in the orignal GAN paper).
pix2pix paper: https://arxiv.org/pdf/1611.07004.pdf
"""
@staticmethod
def modify_commandline_options(parser, is_train=True):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
For pix2pix, we do not use image buffer.
The training objective is: GAN Loss + lambda_L1 * ||G(A)-B||_1
By default, we use vanilla GAN loss, UNet with batchnorm, and aligned datasets.
"""
# changing the default values to match the pix2pix paper (https://phillipi.github.io/pix2pix/)
parser.set_defaults(norm='batch', netG='unet_256', dataset_mode='aligned')
if is_train:
parser.set_defaults(pool_size=0, gan_mode='vanilla')
parser.add_argument('--lambda_L1', type=float, default=100.0, help='weight for L1 loss')
return parser
def __init__(self, opt):
"""Initialize the pix2pix class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseModel.__init__(self, opt)
# specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
self.loss_names = ['G_GAN', 'G_L1', 'D_real', 'D_fake']
# specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
self.visual_names = ['real_A', 'fake_B', 'real_B']
# specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>
if self.isTrain:
self.model_names = ['G', 'D']
else: # during test time, only load G
self.model_names = ['G']
# define networks (both generator and discriminator)
self.netG = base_networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain: # define a discriminator; conditional GANs need to take both input and output images; Therefore, #channels for D is input_nc + output_nc
self.netD = base_networks.define_D(opt.input_nc + opt.output_nc, opt.ndf, opt.netD,
opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain:
# define loss functions
self.criterionGAN = base_networks.GANLoss(opt.gan_mode).to(self.device)
self.criterionL1 = torch.nn.L1Loss()
# initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_G)
self.optimizers.append(self.optimizer_D)
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): include the data itself and its metadata information.
"""
self.real_A = input['A'].to(self.device)
self.real_B = input['B'].to(self.device)
self.A_paths = input['A_paths']
self.B_paths = input['B_paths']
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
self.fake_B = self.netG(self.real_A) # G(A)
def backward_D(self):
"""Calculate GAN loss for the discriminator"""
# Fake; stop backprop to the generator by detaching fake_B
fake_AB = torch.cat((self.real_A, self.fake_B), 1) # we use conditional GANs; we need to feed both input and output to the discriminator
pred_fake = self.netD(fake_AB.detach())
self.loss_D_fake = self.criterionGAN(pred_fake, False)
# Real
real_AB = torch.cat((self.real_A, self.real_B), 1)
pred_real = self.netD(real_AB)
self.loss_D_real = self.criterionGAN(pred_real, True)
# combine loss and calculate gradients
self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5
self.loss_D.backward()
def backward_G(self):
"""Calculate GAN and L1 loss for the generator"""
# First, G(A) should fake the discriminator
fake_AB = torch.cat((self.real_A, self.fake_B), 1)
pred_fake = self.netD(fake_AB)
self.loss_G_GAN = self.criterionGAN(pred_fake, True)
# Second, G(A) = B
self.loss_G_L1 = self.criterionL1(self.fake_B, self.real_B) * self.opt.lambda_L1
# combine loss and calculate gradients
self.loss_G = self.loss_G_GAN + self.loss_G_L1
self.loss_G.backward()
def optimize_parameters(self):
self.forward() # compute fake images: G(A)
# update D
self.set_requires_grad(self.netD, True) # enable backprop for D
self.optimizer_D.zero_grad() # set D's gradients to zero
self.backward_D() # calculate gradients for D
self.optimizer_D.step() # update D's weights
# update G
self.set_requires_grad(self.netD, False) # D requires no gradients when optimizing G
self.optimizer_G.zero_grad() # set G's gradients to zero
self.backward_G() # calculate graidents for G
self.optimizer_G.step() # udpate G's weights
| [
"chenyx.cs@gmail.com"
] | chenyx.cs@gmail.com |
8e32439681edbd47329d1474f20b17008dc11dd4 | ff8ec937d9e5bef6d527f91ec4c8a2248063e9f8 | /Flask_Projects/HuntingBallApp/config/config.py | dcbf3f6a8a86d1b4f003e229f2c60d94f9750b65 | [] | no_license | zyxyuanxiao/Python-Framework-Study-Resources | 3c7743946b828dbd4c0a5b530363d36e54319e9c | cff0f9cefa36afa9fb43f0af5478b7428795d718 | refs/heads/master | 2020-09-04T15:00:06.987122 | 2019-08-19T10:07:29 | 2019-08-19T10:07:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,080 | py | # -*- coding: utf-8 -*-
# @Time : 2018/10/20 下午4:02
# @Author : ShaHeTop-Almighty-ares
# @Email : yang6333yyx@126.com
# @File : config.py
# @Software: PyCharm
from datetime import timedelta
import redis
import os
def app_conf():
"""
# 设置环境
export FLASK_ENV=development
export FLASK_ENV=production
PS:
* 由于使用PyCharm直接运行时无法通过os.environ.get('FLASK_ENV')获取到系统变量,所以export FLASK_ENV=='环境'之后FLASK_ENV依然为None。
** 在Flask中FLASK_ENV==None 会默认使用production作为环境。
*** 需要使用终端python run.py执行。os.environ.get('FLASK_ENV')才会生效获取到设置的环境。
**** 为了方便使用PyCharm进行开发调试:添加使用以下代码将production覆盖。
解决方法:
(1)使用以下代码覆盖 //部署生产环境时注释以下代码
if not os.environ.get('FLASK_ENV'):
config_key = 'default'
print('Pycharm开发环境:%s' % config_key)
return config_key
(2)在PyCharm设置变量FLASK_ENV=development
"""
config_key = 'development'
if os.environ.get('FLASK_ENV') == 'development':
config_key = 'development'
# print('开发环境:%s' % config_key)
return config_key
elif os.environ.get('FLASK_ENV') == 'production':
config_key = 'production'
# print('生产环境:%s' % config_key)
return config_key
else:
config_key = 'production'
# print('生产环境:%s' % config_key)
return config_key
class BaseConfig:
"""配置基类"""
# SECRET_KEY = os.urandom(24)
SECRET_KEY = 'ShaHeTop-Almighty-ares' # session加密
PERMANENT_SESSION_LIFETIME = timedelta(days=30) # 设置session过期时间
DEBUG = True
# SERVER_NAME = 'example.com'
RUN_HOST = '0.0.0.0'
RUN_PORT = 9999
"""Mysql"""
HOSTNAME = '127.0.0.1'
PORT = '3306'
USERNAME = 'root'
PASSWORD = '123456'
DATABASE = 'HuntingBallApp'
# &autocommit=true
DB_URI = 'mysql+pymysql://{}:{}@{}:{}/{}?charset=utf8'.format(
USERNAME,
PASSWORD,
HOSTNAME,
PORT,
DATABASE)
SQLALCHEMY_DATABASE_URI = DB_URI
SQLALCHEMY_TRACK_MODIFICATIONS = True
"""Redis"""
# host是redis主机,需要redis服务端和客户端都起着 redis默认端口是6379
REDIS_PWD = 123456
POOL = redis.ConnectionPool(host='localhost', port=6379, password=REDIS_PWD, decode_responses=True, db=1)
R = redis.Redis(connection_pool=POOL)
@staticmethod
def init_app(app):
pass
class ProductionConfig(BaseConfig):
"""生产环境"""
DEBUG = False
RUN_PORT = 5000
PASSWORD = 'okcokc111111' # mysql
REDIS_PWD = 'okc1111' # redis
class DevelopmentConfig(BaseConfig):
"""开发环境"""
pass
config_obj = {
'production': ProductionConfig,
'development': DevelopmentConfig,
'default': DevelopmentConfig
}
if __name__ == '__main__':
print(config_obj['default'].DB_URI) | [
"yang6333yyx@126.com"
] | yang6333yyx@126.com |
d163d193ab625d2cae6dc04b0724d037904fd11f | 8487cb41afd00c9cc30402fd3f06e7f52650669e | /python/Run2016G-Nano14Dec2018-v1/MET_cff.py | 6daabedbe6cf3b8f2b2d7082e12dc2e3ef26cd34 | [] | no_license | TreeMaker/ecalBadCalibFilterLists | bf7fd35aee6bccc90b214542e163a94c3f3749f3 | 1e43aff994dff79d768b7b9d284aab6cb951e864 | refs/heads/master | 2020-04-24T05:58:37.174381 | 2019-03-13T20:58:15 | 2019-03-13T20:58:15 | 171,749,544 | 0 | 2 | null | 2019-03-13T20:58:16 | 2019-02-20T21:08:38 | C | UTF-8 | Python | false | false | 1,911 | py | import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
'/store/data/Run2016G/MET/NANOAOD/Nano14Dec2018-v1/80000/259999CE-35DF-FC40-94F3-AF083D3D615B.root',
'/store/data/Run2016G/MET/NANOAOD/Nano14Dec2018-v1/80000/2FFD045A-0E81-494D-A1C5-5D05A7D261F9.root',
'/store/data/Run2016G/MET/NANOAOD/Nano14Dec2018-v1/80000/36B5871A-2642-C645-B678-000A44EEA080.root',
'/store/data/Run2016G/MET/NANOAOD/Nano14Dec2018-v1/80000/3E28CDD2-923E-7B4B-8347-B2A3A77D9295.root',
'/store/data/Run2016G/MET/NANOAOD/Nano14Dec2018-v1/80000/3EAE39E3-9984-3347-B36D-F780D8D9EE42.root',
'/store/data/Run2016G/MET/NANOAOD/Nano14Dec2018-v1/80000/5CB2D2AB-E077-3F44-A856-56382B8D37FE.root',
'/store/data/Run2016G/MET/NANOAOD/Nano14Dec2018-v1/80000/66DE259F-79E4-3E4A-B321-EAE3541F4E63.root',
'/store/data/Run2016G/MET/NANOAOD/Nano14Dec2018-v1/80000/69885CC9-D601-5D4D-94BA-A89F4A523567.root',
'/store/data/Run2016G/MET/NANOAOD/Nano14Dec2018-v1/80000/783BB280-F41E-9245-9789-0E2D4D917A4C.root',
'/store/data/Run2016G/MET/NANOAOD/Nano14Dec2018-v1/80000/8461E89F-62D0-F343-83A2-46966D193DD2.root',
'/store/data/Run2016G/MET/NANOAOD/Nano14Dec2018-v1/80000/A442A143-884E-0148-BB51-9BBCF5FD6BA5.root',
'/store/data/Run2016G/MET/NANOAOD/Nano14Dec2018-v1/80000/AA1B8977-17ED-864B-8F4E-0777D0FAD48C.root',
'/store/data/Run2016G/MET/NANOAOD/Nano14Dec2018-v1/80000/B5830E7A-9B91-3C40-A94B-2E277526DA74.root',
'/store/data/Run2016G/MET/NANOAOD/Nano14Dec2018-v1/80000/DB06E5F5-DF77-DE4D-A57B-61AC72A034CE.root',
'/store/data/Run2016G/MET/NANOAOD/Nano14Dec2018-v1/80000/E635B26B-BBF9-9346-A1D5-C68388711262.root',
] )
| [
"Alexx.Perloff@Colorado.edu"
] | Alexx.Perloff@Colorado.edu |
580a57ae46929c6900e795d79b5db84f2c334313 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/59/usersdata/158/48878/submittedfiles/testes.py | 8ce6c6878e8f49b7c1aa558cb328afafad3ddb5b | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
cf=float(input('custo de fábrica:'))
cc=cf+((28/100)*cf)+((45/100)*cf)
print('o valor final do carro é: %.2f' %cc)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
f9f48d29e03e63e50b03d5d8890ec5867a25678f | 29fba3f7c37c05ed466e610c8e95570e78456a3e | /ops.py | 073febf4bfc8f91c1502b0d52456f9d84d567e4e | [
"MIT"
] | permissive | fendaq/CartoonGAN-Tensorflow | 079dc00773fee5159e3f8a5b06d6371c2d014f2a | 4130160671c9690f2e7079709ed897a9678259bf | refs/heads/master | 2020-03-24T02:37:18.440485 | 2018-07-26T02:19:01 | 2018-07-26T02:19:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,251 | py | import tensorflow as tf
import tensorflow.contrib as tf_contrib
from vgg19 import Vgg19
# Xavier : tf_contrib.layers.xavier_initializer()
# He : tf_contrib.layers.variance_scaling_initializer()
# Normal : tf.random_normal_initializer(mean=0.0, stddev=0.02)
# l2_decay : tf_contrib.layers.l2_regularizer(0.0001)
weight_init = tf.random_normal_initializer(mean=0.0, stddev=0.02)
weight_regularizer = None
##################################################################################
# Layer
##################################################################################
def conv(x, channels, kernel=4, stride=2, pad=0, pad_type='zero', use_bias=True, sn=False, scope='conv_0'):
with tf.variable_scope(scope):
if pad_type == 'zero' :
x = tf.pad(x, [[0, 0], [pad, pad], [pad, pad], [0, 0]])
if pad_type == 'reflect' :
x = tf.pad(x, [[0, 0], [pad, pad], [pad, pad], [0, 0]], mode='REFLECT')
if sn :
w = tf.get_variable("kernel", shape=[kernel, kernel, x.get_shape()[-1], channels], initializer=weight_init,
regularizer=weight_regularizer)
x = tf.nn.conv2d(input=x, filter=spectral_norm(w),
strides=[1, stride, stride, 1], padding='VALID')
if use_bias :
bias = tf.get_variable("bias", [channels], initializer=tf.constant_initializer(0.0))
x = tf.nn.bias_add(x, bias)
else :
x = tf.layers.conv2d(inputs=x, filters=channels,
kernel_size=kernel, kernel_initializer=weight_init,
kernel_regularizer=weight_regularizer,
strides=stride, use_bias=use_bias)
return x
def deconv(x, channels, kernel=4, stride=2, use_bias=True, sn=False, scope='deconv_0'):
with tf.variable_scope(scope):
x_shape = x.get_shape().as_list()
output_shape = [x_shape[0], x_shape[1]*stride, x_shape[2]*stride, channels]
if sn :
w = tf.get_variable("kernel", shape=[kernel, kernel, channels, x.get_shape()[-1]], initializer=weight_init, regularizer=weight_regularizer)
x = tf.nn.conv2d_transpose(x, filter=spectral_norm(w), output_shape=output_shape, strides=[1, stride, stride, 1], padding='SAME')
if use_bias :
bias = tf.get_variable("bias", [channels], initializer=tf.constant_initializer(0.0))
x = tf.nn.bias_add(x, bias)
else :
x = tf.layers.conv2d_transpose(inputs=x, filters=channels,
kernel_size=kernel, kernel_initializer=weight_init, kernel_regularizer=weight_regularizer,
strides=stride, padding='SAME', use_bias=use_bias)
return x
##################################################################################
# Residual-block
##################################################################################
def resblock(x_init, channels, use_bias=True, scope='resblock_0'):
with tf.variable_scope(scope):
with tf.variable_scope('res1'):
x = conv(x_init, channels, kernel=3, stride=1, pad=1, pad_type='reflect', use_bias=use_bias)
x = instance_norm(x)
x = relu(x)
with tf.variable_scope('res2'):
x = conv(x, channels, kernel=3, stride=1, pad=1, pad_type='reflect', use_bias=use_bias)
x = instance_norm(x)
return x + x_init
##################################################################################
# Sampling
##################################################################################
def flatten(x) :
return tf.layers.flatten(x)
##################################################################################
# Activation function
##################################################################################
def lrelu(x, alpha=0.2):
return tf.nn.leaky_relu(x, alpha)
def relu(x):
return tf.nn.relu(x)
def tanh(x):
return tf.tanh(x)
def sigmoid(x) :
return tf.sigmoid(x)
##################################################################################
# Normalization function
##################################################################################
def instance_norm(x, scope='instance_norm'):
return tf_contrib.layers.instance_norm(x,
epsilon=1e-05,
center=True, scale=True,
scope=scope)
def layer_norm(x, scope='layer_norm') :
return tf_contrib.layers.layer_norm(x,
center=True, scale=True,
scope=scope)
def batch_norm(x, is_training=True, scope='batch_norm'):
return tf_contrib.layers.batch_norm(x,
decay=0.9, epsilon=1e-05,
center=True, scale=True, updates_collections=None,
is_training=is_training, scope=scope)
def spectral_norm(w, iteration=1):
w_shape = w.shape.as_list()
w = tf.reshape(w, [-1, w_shape[-1]])
u = tf.get_variable("u", [1, w_shape[-1]], initializer=tf.truncated_normal_initializer(), trainable=False)
u_hat = u
v_hat = None
for i in range(iteration):
"""
power iteration
Usually iteration = 1 will be enough
"""
v_ = tf.matmul(u_hat, tf.transpose(w))
v_hat = l2_norm(v_)
u_ = tf.matmul(v_hat, w)
u_hat = l2_norm(u_)
sigma = tf.matmul(tf.matmul(v_hat, w), tf.transpose(u_hat))
w_norm = w / sigma
with tf.control_dependencies([u.assign(u_hat)]):
w_norm = tf.reshape(w_norm, w_shape)
return w_norm
def l2_norm(v, eps=1e-12):
return v / (tf.reduce_sum(v ** 2) ** 0.5 + eps)
##################################################################################
# Loss function
##################################################################################
def L1_loss(x, y):
loss = tf.reduce_mean(tf.abs(x - y))
return loss
def discriminator_loss(loss_func, real, fake, real_blur):
real_loss = 0
fake_loss = 0
real_blur_loss = 0
if loss_func == 'wgan-gp' or loss_func == 'wgan-lp':
real_loss = -tf.reduce_mean(real)
fake_loss = tf.reduce_mean(fake)
real_blur_loss = tf.reduce_mean(real_blur)
if loss_func == 'lsgan' :
real_loss = tf.reduce_mean(tf.square(real - 1.0))
fake_loss = tf.reduce_mean(tf.square(fake))
real_blur_loss = tf.reduce_mean(tf.square(real_blur))
if loss_func == 'gan' or loss_func == 'dragan' :
real_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(real), logits=real))
fake_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(fake), logits=fake))
real_blur_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(real_blur), logits=real_blur))
if loss_func == 'hinge':
real_loss = tf.reduce_mean(relu(1.0 - real))
fake_loss = tf.reduce_mean(relu(1.0 + fake))
real_blur_loss = tf.reduce_mean(relu(1.0 + real_blur))
loss = real_loss + fake_loss + real_blur_loss
return loss
def generator_loss(loss_func, fake):
fake_loss = 0
if loss_func == 'wgan-gp' or loss_func == 'wgan-lp':
fake_loss = -tf.reduce_mean(fake)
if loss_func == 'lsgan' or loss_func == 'lsdragan' or loss_func == 'lsdragan-lp':
fake_loss = tf.reduce_mean(tf.square(fake - 1.0))
if loss_func == 'gan' or loss_func == 'dragan':
fake_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(fake), logits=fake))
if loss_func == 'hinge':
fake_loss = -tf.reduce_mean(fake)
loss = fake_loss
return loss
def vgg_loss(real, fake):
vgg = Vgg19('vgg19.npy')
vgg.build(real)
real_feature_map = vgg.conv4_4_no_activation
vgg.build(fake)
fake_feature_map = vgg.conv4_4_no_activation
loss = L1_loss(real_feature_map, fake_feature_map)
return loss
| [
"takis0112@gmail.com"
] | takis0112@gmail.com |
3d34d9f074f5342a1d3e3ab80a45924e4c7a2f2a | 71711bd2c11a3c0cbbc99bcfa78384d005e07828 | /base_mcts/two_voice_uct_mcts.py | f692c0814c9e78247b25884fe720f897b93d450a | [
"BSD-3-Clause"
] | permissive | kastnerkyle/exploring_species_counterpoint | 9365b2485cd227e375521f769ba1bfbd62c7b629 | dda762463e64036adeba7efd46c51daaaf906019 | refs/heads/master | 2021-09-13T10:55:03.096300 | 2018-04-28T19:00:21 | 2018-04-28T19:00:21 | 103,225,538 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,087 | py | # Author: Kyle Kastner
# License: BSD 3-Clause
# http://mcts.ai/pubs/mcts-survey-master.pdf
# See similar implementation here
# https://github.com/junxiaosong/AlphaZero_Gomoku
# changes from high level pseudo-code in survey
# expand all children, but only rollout one
# section biases to unexplored nodes, so the children with no rollout
# will be explored quickly
import numpy as np
import copy
from dataset_wrap import two_voice_species1_wrap
from analysis import analyze_two_voices
from shared_mcts import MCTS
all_l, l_map, p_map, all_i = two_voice_species1_wrap()
l_inv_map = {v: k for k, v in l_map.items()}
p_inv_map = {v: k for k, v in p_map.items()}
va_p = [p_map[k] for k in sorted(p_map.keys())]
j_map = {p_inv_map[k1]: k1 for k1 in va_p}
j_map = {k: v for k, v in j_map.items() if k > 0}
j_inv_map = {v: k for k, v in j_map.items()}
j_acts_map = {k: v for k, v in enumerate(sorted(j_map.keys()))}
j_acts_inv_map = {v: k for k, v in j_acts_map.items()}
class TwoVoiceSpecies1Manager(object):
def __init__(self, guide_index, default_mode="C", offset_value=60, tonality="-", rollout_limit=1000):
self.default_mode = default_mode
self.offset_value = offset_value
# M or m or - major or minor or any tonality
self.tonality = tonality
self.guide_trace = all_l[guide_index]
self.random_state = np.random.RandomState(1999)
self.rollout_limit = rollout_limit
def get_next_state(self, state, action):
act = j_acts_map[action]
new_state = [state[0] + [act], state[1]]
return new_state
def get_action_space(self):
return list(range(len(j_acts_map.keys())))
def get_valid_actions(self, state):
s0 = np.array(state[0])
s1 = np.array(state[1])
if self.tonality == "M":
# disallow minor 3rds and 6ths
disallowed = [3, 8, 15, 20, 27]
elif self.tonality == "m":
# disallow major 3rds and 6ths
disallowed = [4, 9, 16, 21, 28]
elif self.tonality == "-":
disallowed = []
else:
raise ValueError("self.tonality setting {} not understood".format(self.tonality))
if len(state[0]) == 0:
# for first notes, keep it pretty open
va_p = [p_map[k] for k in sorted(p_map.keys())]
acts_i = [p for p in va_p]
acts = [p_inv_map[a] for a in acts_i]
# no voice crossing, m/M2
acts = [a for a in acts if a > 2]
# remove combinations violating tonality
acts = [a for a in acts if a not in disallowed]
acts_r = [j_acts_inv_map[a] for a in acts if a in j_acts_inv_map]
return acts_r
else:
va_p = [p_map[k] for k in sorted(p_map.keys())]
acts_i = [p for p in va_p]
acts = [p_inv_map[a] for a in acts_i]
# no leaps of greater than a 4th
acts = [a for a in acts if abs(a - state[0][-1]) <= 5]
# no voice crossing, m/M2
acts = [a for a in acts if a > 2]
# remove combinations violating tonality
acts = [a for a in acts if a not in disallowed]
acts_r = [j_acts_inv_map[a] for a in acts if a in j_acts_inv_map]
return acts_r
def get_init_state(self):
# need to inspect the others...
top = []
bot = self.guide_trace
return copy.deepcopy([top, bot])
def _rollout_fn(self, state):
return self.random_state.choice(self.get_valid_actions(state))
def _score(self, state):
s0 = np.array(state[0])
s1 = np.array(state[1])
bot = s1 + self.offset_value
top = bot[:len(s0)] + s0
unique_count = len(set(list(s0))) / float(len(s0))
smooth_s = 1. / np.sum(np.abs(np.diff(top)))
unique_max = 1. / float(len(np.where(top == np.max(top))[0]))
return smooth_s #+ unique_max + unique_count
def rollout_from_state(self, state):
s = state
w, sc, e = self.is_finished(state)
if e:
if w == -1:
return -1
elif w == 0:
return sc
else:
return self._score(s)
c = 0
while True:
a = self._rollout_fn(s)
s = self.get_next_state(s, a)
w, sc, e = self.is_finished(s)
c += 1
if e:
if w == -1:
return -1
elif w == 0:
return sc
else:
return self._score(s)
if c > self.rollout_limit:
return 0.
def is_finished(self, state):
if len(self.get_valid_actions(state)) == 0:
return -1., -1., True
if len(state[0]) == 0:
# nothing has happened yet
return 0, 0., False
ns0 = state[0] + [0] * (len(state[1]) - len(state[0]))
s_l = [ns0, state[1]]
s = np.array(s_l)
s[1, :] += self.offset_value
s[0, :] += s[1, :]
parts = s
durations = [['4'] * len(p) for p in parts]
key_signature = "C"
time_signature = "4/4"
# add caching here?
aok = analyze_two_voices(parts, durations, key_signature, time_signature,
species="species1_minimal", cantus_firmus_voices=[1])
if len(aok[1]["False"]) > 0:
first_error = aok[1]["False"][0]
else:
first_error = np.inf
if len(state[0]) < len(state[1]):
# error is out of our control (in the padded notes)
if first_error > (len(state[0]) - 1):
return 0, 0., False
else:
# made a mistake
return 0, -1. + len(state[0]) / float(len(state[1])), True
elif aok[0]:
return 1, 1., True
else:
return -1, -1., True
if __name__ == "__main__":
import time
from visualization import pitches_and_durations_to_pretty_midi
from visualization import plot_pitches_and_durations
from analysis import fixup_parts_durations
from analysis import intervals_from_midi
all_parts = []
all_durations = []
mcts_random = np.random.RandomState(1110)
for guide_idx in range(len(all_l)):
tvsp1m = TwoVoiceSpecies1Manager(guide_idx)
mcts = MCTS(tvsp1m, n_playout=1000, random_state=mcts_random)
resets = 0
n_valid_samples = 0
valid_state_traces = []
temp = 1.
noise = True
while True:
if n_valid_samples >= 1:
print("Got a valid sample")
break
resets += 1
if resets > 30:
temp = 1E-3
noise = False
state = mcts.state_manager.get_init_state()
winner, score, end = mcts.state_manager.is_finished(state)
states = [state]
while True:
if not end:
print("guide {}, step {}, resets {}".format(guide_idx, len(states), resets))
#print(state)
#a, ap = mcts.sample_action(state, temp=temp, add_noise=noise)
a, ap = mcts.get_action(state)
if a is None:
print("Ran out of valid actions, stopping early at step {}".format(len(states)))
valid_state_traces.append(states[-1])
n_valid_samples += 1
break
for i in mcts.root.children_.keys():
print(i, mcts.root.children_[i].__dict__)
print("")
print(state)
mcts.update_tree_root(a)
state = mcts.state_manager.get_next_state(state, a)
states.append(state)
winner, score, end = mcts.state_manager.is_finished(state)
else:
mcts.reconstruct_tree()
print(state)
if len(states) > 1 and len(states[-1][0]) == len(states[-1][1]):
print("Got to the end")
n_valid_samples += 1
valid_state_traces.append(states[-1])
break
else:
print("Finished in {} steps".format(len(states)))
break
s = valid_state_traces[0]
s0 = np.array(s[0])
s1 = np.array(s[1])
bot = s1 + mcts.state_manager.offset_value
bot = bot[:len(s0)]
top = bot + s0
parts = [list(top), list(bot)]
durations = [['4'] * len(p) for p in parts]
durations = [[int(di) for di in d] for d in durations]
interval_figures = intervals_from_midi(parts, durations)
_, interval_durations = fixup_parts_durations(parts, durations)
all_parts.append(parts)
all_durations.append(durations)
print("completed {}".format(guide_idx))
# now dump samples
pitches_and_durations_to_pretty_midi(all_parts, all_durations,
save_dir="two_voice_puct_mcts_samples",
name_tag="two_voice_puct_mcts_sample_{}.mid",
default_quarter_length=240,
voice_params="piano")
key_signature = "C"
time_signature = "4/4"
clefs = ["treble", "bass"]
plot_pitches_and_durations(all_parts, all_durations,
save_dir="two_voice_puct_mcts_plots",
name_tag="two_voice_puct_mcts_plot_{}.ly")
#interval_figures=interval_figures,
#interval_durations=interval_durations,
#use_clefs=clefs)
# add caching here?
# minimal check during rollout
from IPython import embed; embed(); raise ValueError()
| [
"kastnerkyle@gmail.com"
] | kastnerkyle@gmail.com |
346d53ab383825b68c08ad5965f9c6b063709893 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_dampers.py | 3dcd5ff2c852940976e53da2f7db1e902d48cf0b | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py |
#calss header
class _DAMPERS():
def __init__(self,):
self.name = "DAMPERS"
self.definitions = damper
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['damper']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
ed089dd7e4ef4bb26dae77576405c261a91ab7f2 | f8f2536fa873afa43dafe0217faa9134e57c8a1e | /aliyun-python-sdk-dms-enterprise/aliyunsdkdms_enterprise/request/v20181101/ListDatabaseUserPermssionsRequest.py | 1f27eb7699a8380d923ac31a72e1792a68f5f12e | [
"Apache-2.0"
] | permissive | Sunnywillow/aliyun-openapi-python-sdk | 40b1b17ca39467e9f8405cb2ca08a85b9befd533 | 6855864a1d46f818d73f5870da0efec2b820baf5 | refs/heads/master | 2022-12-04T02:22:27.550198 | 2020-08-20T04:11:34 | 2020-08-20T04:11:34 | 288,944,896 | 1 | 0 | NOASSERTION | 2020-08-20T08:04:01 | 2020-08-20T08:04:01 | null | UTF-8 | Python | false | false | 2,355 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdms_enterprise.endpoint import endpoint_data
class ListDatabaseUserPermssionsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'dms-enterprise', '2018-11-01', 'ListDatabaseUserPermssions','dmsenterprise')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_PermType(self):
return self.get_query_params().get('PermType')
def set_PermType(self,PermType):
self.add_query_param('PermType',PermType)
def get_DbId(self):
return self.get_query_params().get('DbId')
def set_DbId(self,DbId):
self.add_query_param('DbId',DbId)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_Logic(self):
return self.get_query_params().get('Logic')
def set_Logic(self,Logic):
self.add_query_param('Logic',Logic)
def get_Tid(self):
return self.get_query_params().get('Tid')
def set_Tid(self,Tid):
self.add_query_param('Tid',Tid)
def get_PageNumber(self):
return self.get_query_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_query_param('PageNumber',PageNumber)
def get_UserName(self):
return self.get_query_params().get('UserName')
def set_UserName(self,UserName):
self.add_query_param('UserName',UserName) | [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
f6f93bec6a9ed313c15f650bd437670efc3c85ce | 44ba7f2c3e396ab2c58ce42763da5c18f5d0db4b | /ethicml/implementations/svm.py | 85c8de88e2b577e559d2c95422816287de183b90 | [] | no_license | anonymous-iclr-3518/code-for-submission | 99e45110d2377c08433b619afb9c14cf645be5b0 | 3aecb7642d9611ae0a61cd47948931f8f47b6f76 | refs/heads/main | 2023-01-13T18:27:03.728542 | 2020-11-25T15:21:49 | 2020-11-25T15:21:49 | 315,338,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,008 | py | """Implementation of SVM (actually just a wrapper around sklearn)."""
from pathlib import Path
import numpy as np
from sklearn.svm import SVC, LinearSVC
from .utils import InAlgoArgs
class SvmArgs(InAlgoArgs):
"""Commandline arguments for SVM."""
c: float
kernel: str
def main():
"""This function runs the SVM model as a standalone program."""
args = SvmArgs().parse_args()
with open(args.train, "rb") as train_file:
train = np.load(train_file)
train_x, train_y = train["x"], train["y"]
with open(args.test, "rb") as test_file:
test = np.load(test_file)
test_x = test["x"]
if args.kernel == "linear":
clf = LinearSVC(C=args.c, dual=False, tol=1e-12, random_state=888)
else:
clf = SVC(C=args.c, kernel=args.kernel, gamma="auto", random_state=888)
clf.fit(train_x, train_y.ravel())
predictions = clf.predict(test_x)
np.savez(Path(args.predictions), hard=predictions)
if __name__ == "__main__":
main()
| [
"anon@ymo.us"
] | anon@ymo.us |
e367812cc0beb8b25b485671395f92b4d26a3224 | 9aaa5eccdb29909c48de9f03732d598fa66920e5 | /binheap.py | 1c50eeecfb93a62782851c8d25b5c828fd75b419 | [
"MIT"
] | permissive | vector8188/AlgorithmAnalysisPython | 76e09126be0654c9eca0a53b6153129bf8beff46 | 026ca8bf846a504c5eae1677680306b0462b49b9 | refs/heads/master | 2021-01-22T13:23:39.617256 | 2018-09-14T13:54:21 | 2018-09-14T13:54:21 | 100,664,673 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,269 | py | class BinHeap:
"""Class for implementing BinHeap."""
def __init__(self):
"""Bin heap constructor."""
self.heapList = [0]
self.currentSize = 0
def percUp(self, i):
"""Checks if the newly entered item is greater/lesser than parent."""
while i > 0:
if self.heapList[i] < self.heapList[i//2]:
# if the i//2'th element(parent) is > i(children) then swap.
# which means if the parent > chilren, we need to maintain
# valid heap datastructure where parent are always less than
# chilren.
tmp = self.heapList[i//2]
self.heapList[i//2] = self.heapList[i]
self.heapList[i] = tmp
i = i//2
def insert(self, k):
self.heapList.append(k)
self.currentSize = self.currentSize + 1
self.percUp(self.currentSize)
def percDown(self, i):
print("i --> {}".format(i))
result = i*2 <= self.currentSize
print("Testing {} <= {} --> {}".format(i*2, self.currentSize, result))
while i*2 <= self.currentSize:
print("heaplist before swap is {}".format(self.heapList))
mc = self.minChild(i)
print("mc <-- {}".format(mc))
print(
"self.heapList[{}] is compared with self.heapList[{}]".format(
i, mc))
if self.heapList[i] > self.heapList[mc]:
print(
"self.heapList[{}]-->({}) > self.heapList[{}] --> ({})".
format(i, self.heapList[i], mc, self.heapList[mc]))
tmp = self.heapList[i]
self.heapList[i] = self.heapList[mc]
self.heapList[mc] = tmp
print("heaplist after swap is {}".format(self.heapList))
print("i <-- mc".format(mc))
i = mc
result = i*2 <= self.currentSize
print("i --> {} \n".format(i))
print("Testing {} <= {} --> {}".format(
i*2, self.currentSize, result))
def minChild(self, i):
print("Evaluating minimum of two leaf nodes")
if i*2+1 > self.currentSize:
print("i*2+1-->({}) > self.currentSize-->({})".format(
i*2+1, self.currentSize))
return i*2
else:
print(
"self.heapList[{}] is compared with self.heapList[{}]".format(
i*2+1, i*2))
if self.heapList[i*2+1] > self.heapList[i*2]:
print(
"self.heapList[{}] --> {} is > self.heapList[{}] --> {}".
format(
i*2+1, self.heapList[i*2+1], i*2, self.heapList[i*2]))
print(
"returning {} as an index of child with lesser value".
format(i*2))
return i*2
else:
print(
"self.heapList[{}] --> {} is > self.heapList[{}] --> {}".
format(
i*2, self.heapList[i*2], i*2+1, self.heapList[i*2+1]))
print(
"returning {} as an index of child with lesser value".
format(i*2+1))
return i*2+1
def delMin(self):
print("hepList before deletion: {}".format(self.heapList))
# select lowest element in the heap, which is root.
retval = self.heapList[1]
# select the last item in the list and move it in the front
# in place of root, retval already have same value.
self.heapList[1] = self.heapList[self.currentSize]
# reduce the size of currentSize by one.
self.currentSize = self.currentSize - 1
# pop the highest item in the list
self.heapList.pop()
# percolate down the fatty, sink motherfucker sink.
self.percDown(1)
print("hepList after deletion: {}".format(self.heapList))
return retval
def buildHeap(self, alist):
i = len(alist)//2
self.currentSize = len(alist)
self.heapList = [0] + alist[:]
while (i > 0):
self.percDown(i)
i = i - 1
bh = BinHeap()
bh.buildHeap([9,5,6,2,3])
bh.delMin()
| [
"vaibhav.rbs@gmail.com"
] | vaibhav.rbs@gmail.com |
36ca3c04dd363c14bcc23be5a628b5e7e6829b54 | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/226/users/4131/codes/1593_1801.py | 68aa91038cf015d360afa193c96d8336c1eea4bb | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 74 | py | x1= input("nome")
x2= int(input(" nrf "))
print(("Abra " + x1 + " ") * x2) | [
"jvlo@icomp.ufam.edu.br"
] | jvlo@icomp.ufam.edu.br |
d25711699363e9206514c4e7ad370e6cdfa6d6cc | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_049/ch10_2019_02_25_13_33_57_478412.py | 82ad6cf6556fc264702c4e9a127a1e2a0506b9ad | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 52 | py | def libras_para_kg(x):
y=x*0.453592
return y | [
"you@example.com"
] | you@example.com |
584d4619db06c8d1462cb07e7215ad04c548557e | 31681488e69da3c7e00b0eda28e5cb720ef2299c | /liteiclink/serwb/packet.py | e8fc035b1b096ded56e1cea8560fc1819ccb2679 | [
"BSD-2-Clause"
] | permissive | zsipos/liteiclink | 4e9bdf6a819f490461cb33d0837247041203071d | 864cd831f3475dffd1c92d6d4a1b86608680bcf2 | refs/heads/master | 2021-07-08T07:43:10.897604 | 2020-01-28T09:40:17 | 2020-01-28T09:40:17 | 245,119,569 | 0 | 0 | NOASSERTION | 2020-03-05T09:25:16 | 2020-03-05T09:25:15 | null | UTF-8 | Python | false | false | 4,839 | py | # This file is Copyright (c) 2017-2019 Florent Kermarrec <florent@enjoy-digital.fr>
# License: BSD
from math import ceil
from migen import *
from migen.genlib.misc import WaitTimer
from litex.gen import *
from litex.soc.interconnect import stream
class HeaderField:
def __init__(self, byte, offset, width):
self.byte = byte
self.offset = offset
self.width = width
class Header:
def __init__(self, fields, length, swap_field_bytes=True):
self.fields = fields
self.length = length
self.swap_field_bytes = swap_field_bytes
def get_layout(self):
layout = []
for k, v in sorted(self.fields.items()):
layout.append((k, v.width))
return layout
def get_field(self, obj, name, width):
if "_lsb" in name:
field = getattr(obj, name.replace("_lsb", ""))[:width]
elif "_msb" in name:
field = getattr(obj, name.replace("_msb", ""))[width:2*width]
else:
field = getattr(obj, name)
if len(field) != width:
raise ValueError("Width mismatch on " + name + " field")
return field
def encode(self, obj, signal):
r = []
for k, v in sorted(self.fields.items()):
start = v.byte*8 + v.offset
end = start + v.width
field = self.get_field(obj, k, v.width)
if self.swap_field_bytes:
field = reverse_bytes(field)
r.append(signal[start:end].eq(field))
return r
def decode(self, signal, obj):
r = []
for k, v in sorted(self.fields.items()):
start = v.byte*8 + v.offset
end = start + v.width
field = self.get_field(obj, k, v.width)
if self.swap_field_bytes:
r.append(field.eq(reverse_bytes(signal[start:end])))
else:
r.append(field.eq(signal[start:end]))
return r
def phy_description(dw):
layout = [("data", dw)]
return stream.EndpointDescription(layout)
def user_description(dw):
layout = [
("data", 32),
("length", 32)
]
return stream.EndpointDescription(layout)
class Packetizer(Module):
def __init__(self):
self.sink = sink = stream.Endpoint(user_description(32))
self.source = source = stream.Endpoint(phy_description(32))
# # #
# Packet description
# - preamble : 4 bytes
# - length : 4 bytes
# - payload
fsm = FSM(reset_state="PREAMBLE")
self.submodules += fsm
fsm.act("PREAMBLE",
If(sink.valid,
source.valid.eq(1),
source.data.eq(0x5aa55aa5),
If(source.ready,
NextState("LENGTH")
)
)
)
fsm.act("LENGTH",
source.valid.eq(1),
source.data.eq(sink.length),
If(source.ready,
NextState("DATA")
)
)
fsm.act("DATA",
source.valid.eq(sink.valid),
source.data.eq(sink.data),
sink.ready.eq(source.ready),
If(source.ready & sink.last,
NextState("PREAMBLE")
)
)
class Depacketizer(Module):
def __init__(self, clk_freq, timeout=10):
self.sink = sink = stream.Endpoint(phy_description(32))
self.source = source = stream.Endpoint(user_description(32))
# # #
count = Signal(len(source.length))
length = Signal(len(source.length))
# Packet description
# - preamble : 4 bytes
# - length : 4 bytes
# - payload
fsm = FSM(reset_state="PREAMBLE")
self.submodules += fsm
timer = WaitTimer(clk_freq*timeout)
self.submodules += timer
fsm.act("PREAMBLE",
sink.ready.eq(1),
If(sink.valid &
(sink.data == 0x5aa55aa5),
NextState("LENGTH")
)
)
fsm.act("LENGTH",
sink.ready.eq(1),
If(sink.valid,
NextValue(count, 0),
NextValue(length, sink.data),
NextState("DATA")
),
timer.wait.eq(1)
)
fsm.act("DATA",
source.valid.eq(sink.valid),
source.last.eq(count == (length[2:] - 1)),
source.length.eq(length),
source.data.eq(sink.data),
sink.ready.eq(source.ready),
If(timer.done,
NextState("PREAMBLE")
).Elif(source.valid & source.ready,
NextValue(count, count + 1),
If(source.last,
NextState("PREAMBLE")
)
),
timer.wait.eq(1)
)
| [
"florent@enjoy-digital.fr"
] | florent@enjoy-digital.fr |
9d848e49927a607219c022094d9a79b41a25384e | ad38b9a924911b3249b9ffec01d78a2b1048fa0d | /动态调试/Immunity Debugger v1.73/Lib/subprocess.py | 975c43390b417f3aeb6e2350ef8d73efb7627874 | [] | no_license | h3len/HackerToolBox | 77c5a45553784d20104db21ac5fe8f840ca519a6 | 4397b0c25cfd0eb3f92484f396745cc664af2531 | refs/heads/master | 2020-04-04T22:57:47.376773 | 2018-10-10T15:43:06 | 2018-10-10T15:50:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 43,570 | py | # subprocess - Subprocesses with accessible I/O streams
#
# For more information about this module, see PEP 324.
#
# This module should remain compatible with Python 2.2, see PEP 291.
#
# Copyright (c) 2003-2005 by Peter Astrand <astrand@lysator.liu.se>
#
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
r"""subprocess - Subprocesses with accessible I/O streams
This module allows you to spawn processes, connect to their
input/output/error pipes, and obtain their return codes. This module
intends to replace several other, older modules and functions, like:
os.system
os.spawn*
os.popen*
popen2.*
commands.*
Information about how the subprocess module can be used to replace these
modules and functions can be found below.
Using the subprocess module
===========================
This module defines one class called Popen:
class Popen(args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0):
Arguments are:
args should be a string, or a sequence of program arguments. The
program to execute is normally the first item in the args sequence or
string, but can be explicitly set by using the executable argument.
On UNIX, with shell=False (default): In this case, the Popen class
uses os.execvp() to execute the child program. args should normally
be a sequence. A string will be treated as a sequence with the string
as the only item (the program to execute).
On UNIX, with shell=True: If args is a string, it specifies the
command string to execute through the shell. If args is a sequence,
the first item specifies the command string, and any additional items
will be treated as additional shell arguments.
On Windows: the Popen class uses CreateProcess() to execute the child
program, which operates on strings. If args is a sequence, it will be
converted to a string using the list2cmdline method. Please note that
not all MS Windows applications interpret the command line the same
way: The list2cmdline is designed for applications using the same
rules as the MS C runtime.
bufsize, if given, has the same meaning as the corresponding argument
to the built-in open() function: 0 means unbuffered, 1 means line
buffered, any other positive value means use a buffer of
(approximately) that size. A negative bufsize means to use the system
default, which usually means fully buffered. The default value for
bufsize is 0 (unbuffered).
stdin, stdout and stderr specify the executed programs' standard
input, standard output and standard error file handles, respectively.
Valid values are PIPE, an existing file descriptor (a positive
integer), an existing file object, and None. PIPE indicates that a
new pipe to the child should be created. With None, no redirection
will occur; the child's file handles will be inherited from the
parent. Additionally, stderr can be STDOUT, which indicates that the
stderr data from the applications should be captured into the same
file handle as for stdout.
If preexec_fn is set to a callable object, this object will be called
in the child process just before the child is executed.
If close_fds is true, all file descriptors except 0, 1 and 2 will be
closed before the child process is executed.
if shell is true, the specified command will be executed through the
shell.
If cwd is not None, the current directory will be changed to cwd
before the child is executed.
If env is not None, it defines the environment variables for the new
process.
If universal_newlines is true, the file objects stdout and stderr are
opened as a text files, but lines may be terminated by any of '\n',
the Unix end-of-line convention, '\r', the Macintosh convention or
'\r\n', the Windows convention. All of these external representations
are seen as '\n' by the Python program. Note: This feature is only
available if Python is built with universal newline support (the
default). Also, the newlines attribute of the file objects stdout,
stdin and stderr are not updated by the communicate() method.
The startupinfo and creationflags, if given, will be passed to the
underlying CreateProcess() function. They can specify things such as
appearance of the main window and priority for the new process.
(Windows only)
This module also defines two shortcut functions:
call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
check_call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete. If the
exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
check_call(["ls", "-l"])
Exceptions
----------
Exceptions raised in the child process, before the new program has
started to execute, will be re-raised in the parent. Additionally,
the exception object will have one extra attribute called
'child_traceback', which is a string containing traceback information
from the childs point of view.
The most common exception raised is OSError. This occurs, for
example, when trying to execute a non-existent file. Applications
should prepare for OSErrors.
A ValueError will be raised if Popen is called with invalid arguments.
check_call() will raise CalledProcessError, if the called process
returns a non-zero return code.
Security
--------
Unlike some other popen functions, this implementation will never call
/bin/sh implicitly. This means that all characters, including shell
metacharacters, can safely be passed to child processes.
Popen objects
=============
Instances of the Popen class have the following methods:
poll()
Check if child process has terminated. Returns returncode
attribute.
wait()
Wait for child process to terminate. Returns returncode attribute.
communicate(input=None)
Interact with process: Send data to stdin. Read data from stdout
and stderr, until end-of-file is reached. Wait for process to
terminate. The optional stdin argument should be a string to be
sent to the child process, or None, if no data should be sent to
the child.
communicate() returns a tuple (stdout, stderr).
Note: The data read is buffered in memory, so do not use this
method if the data size is large or unlimited.
The following attributes are also available:
stdin
If the stdin argument is PIPE, this attribute is a file object
that provides input to the child process. Otherwise, it is None.
stdout
If the stdout argument is PIPE, this attribute is a file object
that provides output from the child process. Otherwise, it is
None.
stderr
If the stderr argument is PIPE, this attribute is file object that
provides error output from the child process. Otherwise, it is
None.
pid
The process ID of the child process.
returncode
The child return code. A None value indicates that the process
hasn't terminated yet. A negative value -N indicates that the
child was terminated by signal N (UNIX only).
Replacing older functions with the subprocess module
====================================================
In this section, "a ==> b" means that b can be used as a replacement
for a.
Note: All functions in this section fail (more or less) silently if
the executed program cannot be found; this module raises an OSError
exception.
In the following examples, we assume that the subprocess module is
imported with "from subprocess import *".
Replacing /bin/sh shell backquote
---------------------------------
output=`mycmd myarg`
==>
output = Popen(["mycmd", "myarg"], stdout=PIPE).communicate()[0]
Replacing shell pipe line
-------------------------
output=`dmesg | grep hda`
==>
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
output = p2.communicate()[0]
Replacing os.system()
---------------------
sts = os.system("mycmd" + " myarg")
==>
p = Popen("mycmd" + " myarg", shell=True)
pid, sts = os.waitpid(p.pid, 0)
Note:
* Calling the program through the shell is usually not required.
* It's easier to look at the returncode attribute than the
exitstatus.
A more real-world example would look like this:
try:
retcode = call("mycmd" + " myarg", shell=True)
if retcode < 0:
print >>sys.stderr, "Child was terminated by signal", -retcode
else:
print >>sys.stderr, "Child returned", retcode
except OSError, e:
print >>sys.stderr, "Execution failed:", e
Replacing os.spawn*
-------------------
P_NOWAIT example:
pid = os.spawnlp(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg")
==>
pid = Popen(["/bin/mycmd", "myarg"]).pid
P_WAIT example:
retcode = os.spawnlp(os.P_WAIT, "/bin/mycmd", "mycmd", "myarg")
==>
retcode = call(["/bin/mycmd", "myarg"])
Vector example:
os.spawnvp(os.P_NOWAIT, path, args)
==>
Popen([path] + args[1:])
Environment example:
os.spawnlpe(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg", env)
==>
Popen(["/bin/mycmd", "myarg"], env={"PATH": "/usr/bin"})
Replacing os.popen*
-------------------
pipe = os.popen(cmd, mode='r', bufsize)
==>
pipe = Popen(cmd, shell=True, bufsize=bufsize, stdout=PIPE).stdout
pipe = os.popen(cmd, mode='w', bufsize)
==>
pipe = Popen(cmd, shell=True, bufsize=bufsize, stdin=PIPE).stdin
(child_stdin, child_stdout) = os.popen2(cmd, mode, bufsize)
==>
p = Popen(cmd, shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdin, child_stdout) = (p.stdin, p.stdout)
(child_stdin,
child_stdout,
child_stderr) = os.popen3(cmd, mode, bufsize)
==>
p = Popen(cmd, shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
(child_stdin,
child_stdout,
child_stderr) = (p.stdin, p.stdout, p.stderr)
(child_stdin, child_stdout_and_stderr) = os.popen4(cmd, mode, bufsize)
==>
p = Popen(cmd, shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
(child_stdin, child_stdout_and_stderr) = (p.stdin, p.stdout)
Replacing popen2.*
------------------
Note: If the cmd argument to popen2 functions is a string, the command
is executed through /bin/sh. If it is a list, the command is directly
executed.
(child_stdout, child_stdin) = popen2.popen2("somestring", bufsize, mode)
==>
p = Popen(["somestring"], shell=True, bufsize=bufsize
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdout, child_stdin) = (p.stdout, p.stdin)
(child_stdout, child_stdin) = popen2.popen2(["mycmd", "myarg"], bufsize, mode)
==>
p = Popen(["mycmd", "myarg"], bufsize=bufsize,
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdout, child_stdin) = (p.stdout, p.stdin)
The popen2.Popen3 and popen3.Popen4 basically works as subprocess.Popen,
except that:
* subprocess.Popen raises an exception if the execution fails
* the capturestderr argument is replaced with the stderr argument.
* stdin=PIPE and stdout=PIPE must be specified.
* popen2 closes all filedescriptors by default, but you have to specify
close_fds=True with subprocess.Popen.
"""
import sys
mswindows = (sys.platform == "win32")
import os
import types
import traceback
# Exception classes used by this module.
class CalledProcessError(Exception):
"""This exception is raised when a process run by check_call() returns
a non-zero exit status. The exit status will be stored in the
returncode attribute."""
def __init__(self, returncode, cmd):
self.returncode = returncode
self.cmd = cmd
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
if mswindows:
import threading
import msvcrt
if 0: # <-- change this to use pywin32 instead of the _subprocess driver
import pywintypes
from win32api import GetStdHandle, STD_INPUT_HANDLE, \
STD_OUTPUT_HANDLE, STD_ERROR_HANDLE
from win32api import GetCurrentProcess, DuplicateHandle, \
GetModuleFileName, GetVersion
from win32con import DUPLICATE_SAME_ACCESS, SW_HIDE
from win32pipe import CreatePipe
from win32process import CreateProcess, STARTUPINFO, \
GetExitCodeProcess, STARTF_USESTDHANDLES, \
STARTF_USESHOWWINDOW, CREATE_NEW_CONSOLE
from win32event import WaitForSingleObject, INFINITE, WAIT_OBJECT_0
else:
from _subprocess import *
class STARTUPINFO:
dwFlags = 0
hStdInput = None
hStdOutput = None
hStdError = None
wShowWindow = 0
class pywintypes:
error = IOError
else:
import select
import errno
import fcntl
import pickle
__all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call", "CalledProcessError"]
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# True/False does not exist on 2.2.0
try:
False
except NameError:
False = 0
True = 1
_active = []
def _cleanup():
for inst in _active[:]:
if inst.poll(_deadstate=sys.maxint) >= 0:
try:
_active.remove(inst)
except ValueError:
# This can happen if two threads create a new Popen instance.
# It's harmless that it was already removed, so ignore.
pass
PIPE = -1
STDOUT = -2
def call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
"""
return Popen(*popenargs, **kwargs).wait()
def check_call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete. If
the exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
check_call(["ls", "-l"])
"""
retcode = call(*popenargs, **kwargs)
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
if retcode:
raise CalledProcessError(retcode, cmd)
return retcode
def list2cmdline(seq):
"""
Translate a sequence of arguments into a command line
string, using the same rules as the MS C runtime:
1) Arguments are delimited by white space, which is either a
space or a tab.
2) A string surrounded by double quotation marks is
interpreted as a single argument, regardless of white space
contained within. A quoted string can be embedded in an
argument.
3) A double quotation mark preceded by a backslash is
interpreted as a literal double quotation mark.
4) Backslashes are interpreted literally, unless they
immediately precede a double quotation mark.
5) If backslashes immediately precede a double quotation mark,
every pair of backslashes is interpreted as a literal
backslash. If the number of backslashes is odd, the last
backslash escapes the next double quotation mark as
described in rule 3.
"""
# See
# http://msdn.microsoft.com/library/en-us/vccelng/htm/progs_12.asp
result = []
needquote = False
for arg in seq:
bs_buf = []
# Add a space to separate this argument from the others
if result:
result.append(' ')
needquote = (" " in arg) or ("\t" in arg)
if needquote:
result.append('"')
for c in arg:
if c == '\\':
# Don't know if we need to double yet.
bs_buf.append(c)
elif c == '"':
# Double backspaces.
result.append('\\' * len(bs_buf)*2)
bs_buf = []
result.append('\\"')
else:
# Normal char
if bs_buf:
result.extend(bs_buf)
bs_buf = []
result.append(c)
# Add remaining backspaces, if any.
if bs_buf:
result.extend(bs_buf)
if needquote:
result.extend(bs_buf)
result.append('"')
return ''.join(result)
class Popen(object):
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0):
"""Create new Popen instance."""
_cleanup()
self._child_created = False
if not isinstance(bufsize, (int, long)):
raise TypeError("bufsize must be an integer")
if mswindows:
if preexec_fn is not None:
raise ValueError("preexec_fn is not supported on Windows "
"platforms")
if close_fds:
raise ValueError("close_fds is not supported on Windows "
"platforms")
else:
# POSIX
if startupinfo is not None:
raise ValueError("startupinfo is only supported on Windows "
"platforms")
if creationflags != 0:
raise ValueError("creationflags is only supported on Windows "
"platforms")
self.stdin = None
self.stdout = None
self.stderr = None
self.pid = None
self.returncode = None
self.universal_newlines = universal_newlines
# Input and output objects. The general principle is like
# this:
#
# Parent Child
# ------ -----
# p2cwrite ---stdin---> p2cread
# c2pread <--stdout--- c2pwrite
# errread <--stderr--- errwrite
#
# On POSIX, the child objects are file descriptors. On
# Windows, these are Windows file handles. The parent objects
# are file descriptors on both platforms. The parent objects
# are None when not using PIPEs. The child objects are None
# when not redirecting.
(p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite) = self._get_handles(stdin, stdout, stderr)
self._execute_child(args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
if p2cwrite:
self.stdin = os.fdopen(p2cwrite, 'wb', bufsize)
if c2pread:
if universal_newlines:
self.stdout = os.fdopen(c2pread, 'rU', bufsize)
else:
self.stdout = os.fdopen(c2pread, 'rb', bufsize)
if errread:
if universal_newlines:
self.stderr = os.fdopen(errread, 'rU', bufsize)
else:
self.stderr = os.fdopen(errread, 'rb', bufsize)
def _translate_newlines(self, data):
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
return data
def __del__(self):
if not self._child_created:
# We didn't get to successfully create a child process.
return
# In case the child hasn't been waited on, check if it's done.
self.poll(_deadstate=sys.maxint)
if self.returncode is None and _active is not None:
# Child is still running, keep us alive until we can wait on it.
_active.append(self)
def communicate(self, input=None):
"""Interact with process: Send data to stdin. Read data from
stdout and stderr, until end-of-file is reached. Wait for
process to terminate. The optional input argument should be a
string to be sent to the child process, or None, if no data
should be sent to the child.
communicate() returns a tuple (stdout, stderr)."""
# Optimization: If we are only using one pipe, or no pipe at
# all, using select() or threads is unnecessary.
if [self.stdin, self.stdout, self.stderr].count(None) >= 2:
stdout = None
stderr = None
if self.stdin:
if input:
self.stdin.write(input)
self.stdin.close()
elif self.stdout:
stdout = self.stdout.read()
elif self.stderr:
stderr = self.stderr.read()
self.wait()
return (stdout, stderr)
return self._communicate(input)
if mswindows:
#
# Windows methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tupel with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
if stdin is None and stdout is None and stderr is None:
return (None, None, None, None, None, None)
p2cread, p2cwrite = None, None
c2pread, c2pwrite = None, None
errread, errwrite = None, None
if stdin is None:
p2cread = GetStdHandle(STD_INPUT_HANDLE)
elif stdin == PIPE:
p2cread, p2cwrite = CreatePipe(None, 0)
# Detach and turn into fd
p2cwrite = p2cwrite.Detach()
p2cwrite = msvcrt.open_osfhandle(p2cwrite, 0)
elif isinstance(stdin, int):
p2cread = msvcrt.get_osfhandle(stdin)
else:
# Assuming file-like object
p2cread = msvcrt.get_osfhandle(stdin.fileno())
p2cread = self._make_inheritable(p2cread)
if stdout is None:
c2pwrite = GetStdHandle(STD_OUTPUT_HANDLE)
elif stdout == PIPE:
c2pread, c2pwrite = CreatePipe(None, 0)
# Detach and turn into fd
c2pread = c2pread.Detach()
c2pread = msvcrt.open_osfhandle(c2pread, 0)
elif isinstance(stdout, int):
c2pwrite = msvcrt.get_osfhandle(stdout)
else:
# Assuming file-like object
c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
c2pwrite = self._make_inheritable(c2pwrite)
if stderr is None:
errwrite = GetStdHandle(STD_ERROR_HANDLE)
elif stderr == PIPE:
errread, errwrite = CreatePipe(None, 0)
# Detach and turn into fd
errread = errread.Detach()
errread = msvcrt.open_osfhandle(errread, 0)
elif stderr == STDOUT:
errwrite = c2pwrite
elif isinstance(stderr, int):
errwrite = msvcrt.get_osfhandle(stderr)
else:
# Assuming file-like object
errwrite = msvcrt.get_osfhandle(stderr.fileno())
errwrite = self._make_inheritable(errwrite)
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _make_inheritable(self, handle):
"""Return a duplicate of handle, which is inheritable"""
return DuplicateHandle(GetCurrentProcess(), handle,
GetCurrentProcess(), 0, 1,
DUPLICATE_SAME_ACCESS)
def _find_w9xpopen(self):
"""Find and return absolut path to w9xpopen.exe"""
w9xpopen = os.path.join(os.path.dirname(GetModuleFileName(0)),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
# Eeek - file-not-found - possibly an embedding
# situation - see if we can locate it in sys.exec_prefix
w9xpopen = os.path.join(os.path.dirname(sys.exec_prefix),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
raise RuntimeError("Cannot locate w9xpopen.exe, which is "
"needed for Popen to work with your "
"shell or platform.")
return w9xpopen
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
"""Execute program (MS Windows version)"""
if not isinstance(args, types.StringTypes):
args = list2cmdline(args)
# Process startup details
if startupinfo is None:
startupinfo = STARTUPINFO()
if None not in (p2cread, c2pwrite, errwrite):
startupinfo.dwFlags |= STARTF_USESTDHANDLES
startupinfo.hStdInput = p2cread
startupinfo.hStdOutput = c2pwrite
startupinfo.hStdError = errwrite
if shell:
startupinfo.dwFlags |= STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_HIDE
comspec = os.environ.get("COMSPEC", "cmd.exe")
args = comspec + " /c " + args
if (GetVersion() >= 0x80000000L or
os.path.basename(comspec).lower() == "command.com"):
# Win9x, or using command.com on NT. We need to
# use the w9xpopen intermediate program. For more
# information, see KB Q150956
# (http://web.archive.org/web/20011105084002/http://support.microsoft.com/support/kb/articles/Q150/9/56.asp)
w9xpopen = self._find_w9xpopen()
args = '"%s" %s' % (w9xpopen, args)
# Not passing CREATE_NEW_CONSOLE has been known to
# cause random failures on win9x. Specifically a
# dialog: "Your program accessed mem currently in
# use at xxx" and a hopeful warning about the
# stability of your system. Cost is Ctrl+C wont
# kill children.
creationflags |= CREATE_NEW_CONSOLE
# Start the process
try:
hp, ht, pid, tid = CreateProcess(executable, args,
# no special security
None, None,
# must inherit handles to pass std
# handles
1,
creationflags,
env,
cwd,
startupinfo)
except pywintypes.error, e:
# Translate pywintypes.error to WindowsError, which is
# a subclass of OSError. FIXME: We should really
# translate errno using _sys_errlist (or simliar), but
# how can this be done from Python?
raise WindowsError(*e.args)
# Retain the process handle, but close the thread handle
self._child_created = True
self._handle = hp
self.pid = pid
ht.Close()
# Child is launched. Close the parent's copy of those pipe
# handles that only the child should have open. You need
# to make sure that no handles to the write end of the
# output pipe are maintained in this process or else the
# pipe will not close when the child process exits and the
# ReadFile will hang.
if p2cread is not None:
p2cread.Close()
if c2pwrite is not None:
c2pwrite.Close()
if errwrite is not None:
errwrite.Close()
def poll(self, _deadstate=None):
"""Check if child process has terminated. Returns returncode
attribute."""
if self.returncode is None:
if WaitForSingleObject(self._handle, 0) == WAIT_OBJECT_0:
self.returncode = GetExitCodeProcess(self._handle)
return self.returncode
def wait(self):
"""Wait for child process to terminate. Returns returncode
attribute."""
if self.returncode is None:
obj = WaitForSingleObject(self._handle, INFINITE)
self.returncode = GetExitCodeProcess(self._handle)
return self.returncode
def _readerthread(self, fh, buffer):
buffer.append(fh.read())
def _communicate(self, input):
stdout = None # Return
stderr = None # Return
if self.stdout:
stdout = []
stdout_thread = threading.Thread(target=self._readerthread,
args=(self.stdout, stdout))
stdout_thread.setDaemon(True)
stdout_thread.start()
if self.stderr:
stderr = []
stderr_thread = threading.Thread(target=self._readerthread,
args=(self.stderr, stderr))
stderr_thread.setDaemon(True)
stderr_thread.start()
if self.stdin:
if input is not None:
self.stdin.write(input)
self.stdin.close()
if self.stdout:
stdout_thread.join()
if self.stderr:
stderr_thread.join()
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = stdout[0]
if stderr is not None:
stderr = stderr[0]
# Translate newlines, if requested. We cannot let the file
# object do the translation: It is based on stdio, which is
# impossible to combine with select (unless forcing no
# buffering).
if self.universal_newlines and hasattr(file, 'newlines'):
if stdout:
stdout = self._translate_newlines(stdout)
if stderr:
stderr = self._translate_newlines(stderr)
self.wait()
return (stdout, stderr)
else:
#
# POSIX methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tupel with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
p2cread, p2cwrite = None, None
c2pread, c2pwrite = None, None
errread, errwrite = None, None
if stdin is None:
pass
elif stdin == PIPE:
p2cread, p2cwrite = os.pipe()
elif isinstance(stdin, int):
p2cread = stdin
else:
# Assuming file-like object
p2cread = stdin.fileno()
if stdout is None:
pass
elif stdout == PIPE:
c2pread, c2pwrite = os.pipe()
elif isinstance(stdout, int):
c2pwrite = stdout
else:
# Assuming file-like object
c2pwrite = stdout.fileno()
if stderr is None:
pass
elif stderr == PIPE:
errread, errwrite = os.pipe()
elif stderr == STDOUT:
errwrite = c2pwrite
elif isinstance(stderr, int):
errwrite = stderr
else:
# Assuming file-like object
errwrite = stderr.fileno()
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _set_cloexec_flag(self, fd):
try:
cloexec_flag = fcntl.FD_CLOEXEC
except AttributeError:
cloexec_flag = 1
old = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag)
def _close_fds(self, but):
for i in xrange(3, MAXFD):
if i == but:
continue
try:
os.close(i)
except:
pass
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
"""Execute program (POSIX version)"""
if isinstance(args, types.StringTypes):
args = [args]
if shell:
args = ["/bin/sh", "-c"] + args
if executable is None:
executable = args[0]
# For transferring possible exec failure from child to parent
# The first char specifies the exception type: 0 means
# OSError, 1 means some other error.
errpipe_read, errpipe_write = os.pipe()
self._set_cloexec_flag(errpipe_write)
self.pid = os.fork()
self._child_created = True
if self.pid == 0:
# Child
try:
# Close parent's pipe ends
if p2cwrite:
os.close(p2cwrite)
if c2pread:
os.close(c2pread)
if errread:
os.close(errread)
os.close(errpipe_read)
# Dup fds for child
if p2cread:
os.dup2(p2cread, 0)
if c2pwrite:
os.dup2(c2pwrite, 1)
if errwrite:
os.dup2(errwrite, 2)
# Close pipe fds. Make sure we don't close the same
# fd more than once, or standard fds.
for fd in set((p2cread, c2pwrite, errwrite))-set((0,1,2)):
if fd: os.close(fd)
# Close all other fds, if asked for
if close_fds:
self._close_fds(but=errpipe_write)
if cwd is not None:
os.chdir(cwd)
if preexec_fn:
apply(preexec_fn)
if env is None:
os.execvp(executable, args)
else:
os.execvpe(executable, args, env)
except:
exc_type, exc_value, tb = sys.exc_info()
# Save the traceback and attach it to the exception object
exc_lines = traceback.format_exception(exc_type,
exc_value,
tb)
exc_value.child_traceback = ''.join(exc_lines)
os.write(errpipe_write, pickle.dumps(exc_value))
# This exitcode won't be reported to applications, so it
# really doesn't matter what we return.
os._exit(255)
# Parent
os.close(errpipe_write)
if p2cread and p2cwrite:
os.close(p2cread)
if c2pwrite and c2pread:
os.close(c2pwrite)
if errwrite and errread:
os.close(errwrite)
# Wait for exec to fail or succeed; possibly raising exception
data = os.read(errpipe_read, 1048576) # Exceptions limited to 1 MB
os.close(errpipe_read)
if data != "":
os.waitpid(self.pid, 0)
child_exception = pickle.loads(data)
raise child_exception
def _handle_exitstatus(self, sts):
if os.WIFSIGNALED(sts):
self.returncode = -os.WTERMSIG(sts)
elif os.WIFEXITED(sts):
self.returncode = os.WEXITSTATUS(sts)
else:
# Should never happen
raise RuntimeError("Unknown child exit status!")
def poll(self, _deadstate=None):
"""Check if child process has terminated. Returns returncode
attribute."""
if self.returncode is None:
try:
pid, sts = os.waitpid(self.pid, os.WNOHANG)
if pid == self.pid:
self._handle_exitstatus(sts)
except os.error:
if _deadstate is not None:
self.returncode = _deadstate
return self.returncode
def wait(self):
"""Wait for child process to terminate. Returns returncode
attribute."""
if self.returncode is None:
pid, sts = os.waitpid(self.pid, 0)
self._handle_exitstatus(sts)
return self.returncode
def _communicate(self, input):
read_set = []
write_set = []
stdout = None # Return
stderr = None # Return
if self.stdin:
# Flush stdio buffer. This might block, if the user has
# been writing to .stdin in an uncontrolled fashion.
self.stdin.flush()
if input:
write_set.append(self.stdin)
else:
self.stdin.close()
if self.stdout:
read_set.append(self.stdout)
stdout = []
if self.stderr:
read_set.append(self.stderr)
stderr = []
while read_set or write_set:
rlist, wlist, xlist = select.select(read_set, write_set, [])
if self.stdin in wlist:
# When select has indicated that the file is writable,
# we can write up to PIPE_BUF bytes without risk
# blocking. POSIX defines PIPE_BUF >= 512
bytes_written = os.write(self.stdin.fileno(), input[:512])
input = input[bytes_written:]
if not input:
self.stdin.close()
write_set.remove(self.stdin)
if self.stdout in rlist:
data = os.read(self.stdout.fileno(), 1024)
if data == "":
self.stdout.close()
read_set.remove(self.stdout)
stdout.append(data)
if self.stderr in rlist:
data = os.read(self.stderr.fileno(), 1024)
if data == "":
self.stderr.close()
read_set.remove(self.stderr)
stderr.append(data)
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = ''.join(stdout)
if stderr is not None:
stderr = ''.join(stderr)
# Translate newlines, if requested. We cannot let the file
# object do the translation: It is based on stdio, which is
# impossible to combine with select (unless forcing no
# buffering).
if self.universal_newlines and hasattr(file, 'newlines'):
if stdout:
stdout = self._translate_newlines(stdout)
if stderr:
stderr = self._translate_newlines(stderr)
self.wait()
return (stdout, stderr)
def _demo_posix():
#
# Example 1: Simple redirection: Get process list
#
plist = Popen(["ps"], stdout=PIPE).communicate()[0]
print "Process list:"
print plist
#
# Example 2: Change uid before executing child
#
if os.getuid() == 0:
p = Popen(["id"], preexec_fn=lambda: os.setuid(100))
p.wait()
#
# Example 3: Connecting several subprocesses
#
print "Looking for 'hda'..."
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
print repr(p2.communicate()[0])
#
# Example 4: Catch execution error
#
print
print "Trying a weird file..."
try:
print Popen(["/this/path/does/not/exist"]).communicate()
except OSError, e:
if e.errno == errno.ENOENT:
print "The file didn't exist. I thought so..."
print "Child traceback:"
print e.child_traceback
else:
print "Error", e.errno
else:
print >>sys.stderr, "Gosh. No error."
def _demo_windows():
#
# Example 1: Connecting several subprocesses
#
print "Looking for 'PROMPT' in set output..."
p1 = Popen("set", stdout=PIPE, shell=True)
p2 = Popen('find "PROMPT"', stdin=p1.stdout, stdout=PIPE)
print repr(p2.communicate()[0])
#
# Example 2: Simple execution of program
#
print "Executing calc..."
p = Popen("calc")
p.wait()
if __name__ == "__main__":
if mswindows:
_demo_windows()
else:
_demo_posix()
| [
"redleavessun@gmail.com"
] | redleavessun@gmail.com |
d2d2f55bf58acf2c7b0638ee9c3f974eddcc7f15 | 1f41b828fb652795482cdeaac1a877e2f19c252a | /maya_tools_backup/chRig/python/chModules/jointBasePsd/ui/part1_driverInfo.py | f84898d94a6be94c7c1a4dcd28d3858e67aa209f | [] | no_license | jonntd/mayadev-1 | e315efe582ea433dcf18d7f1e900920f5590b293 | f76aeecb592df766d05a4e10fa2c2496f0310ca4 | refs/heads/master | 2021-05-02T07:16:17.941007 | 2018-02-05T03:55:12 | 2018-02-05T03:55:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,678 | py | import maya.cmds as cmds
import uifunctions as uifnc
import globalInfo
import math
from functools import partial
class MovedDriverList:
def __init__(self, width, targetUI, minValue=0.1 ):
self._width = width-25
self._minValue = minValue
self._updateTargetUi = targetUI
def driverScrollAddPopupCmd(self, *args ):
try: cmds.deleteUI( self.popupUi, menu=1 )
except: pass
self.popupUi = cmds.popupMenu( p=self._updateTargetUi )
def removeSelCmd( *args ):
si = cmds.textScrollList( self._updateTargetUi, q=1, si=1 )
cmds.textScrollList( self._updateTargetUi, e=1, ri=si )
def removeAllCmd( *args ):
cmds.textScrollList( self._updateTargetUi, e=1, ra=1 )
#cmds.deleteUI( self.popupUi, menu=1 )
cmds.menuItem( l='Remove All', c=removeAllCmd )
def addConnectDriver(self, str1, *args ):
driverName = str1.split( ':' )[0]
strList = cmds.textScrollList( self._updateTargetUi, q=1, ai=1 )
if not strList: strList = []
for strTarget in strList:
targetDriverName = strTarget.split( ':' )[0]
if driverName == targetDriverName:
cmds.textScrollList( self._updateTargetUi, e=1, ri=strTarget )
cmds.textScrollList( self._updateTargetUi, e=1, a=str1 )
def add(self, driverName, angleValues=[] ):
if not angleValues:
angleValues = [0,0,0]
defaultBgc = [ .1, .1, .1 ]
onBgc = [ .9, .9, .2 ]
enList = [0,0,0]
bgcList = [None,None,None]
for i in range( 3 ):
if math.fabs( angleValues[i] ) >= self._minValue:
bgcList[i] = onBgc
enList[i] = 1
else:
bgcList[i] = defaultBgc
enList[i] = 0
widthList = uifnc.setWidthByPerList( [70,15,15,15] , self._width )
cmds.rowColumnLayout( nc=4, cw=[(1,widthList[0]),(2,widthList[1]),(3,widthList[2]),(4,widthList[3])] )
cmds.text( l= driverName+' : ', al='right' )
cmds.floatField( precision=2, v=angleValues[0], bgc= bgcList[0] )
cmds.popupMenu(); cmds.menuItem( l='Add Driver', c= partial( self.addConnectDriver, driverName+' | angle0 : %3.2f' %angleValues[0] ) )
cmds.floatField( precision=2, v=angleValues[1], bgc= bgcList[1] )
cmds.popupMenu(); cmds.menuItem( l='Add Driver', c= partial( self.addConnectDriver, driverName+' | angle1 : %3.2f' %angleValues[1] ) )
cmds.floatField( precision=2, v=angleValues[2], bgc= bgcList[2] )
cmds.popupMenu(); cmds.menuItem( l='Add Driver', c= partial( self.addConnectDriver, driverName+' | angle2 : %3.2f' %angleValues[2] ) )
self.driverScrollAddPopupCmd()
cmds.setParent( '..' )
class Cmd:
def __init__(self, width ):
globalInfo.driverInfoInst = self
def updateCmd( self, *args ):
rootName = globalInfo.rootDriver
minValue = 0.1
movedDriverCheck = cmds.checkBox( self._movedDriverCheck, q=1, v=1 )
children = cmds.listRelatives( rootName, c=1, ad=1, f=1 )
angleDriverList = []
for child in children:
hists = cmds.listHistory( child )
for hist in hists:
if cmds.nodeType( hist ) == 'angleDriver':
if not hist in angleDriverList:
angleDriverList.append( hist )
showDrivers = []
for driver in angleDriverList:
if movedDriverCheck:
angle1, angle2, angle3 = cmds.getAttr( driver+'.outDriver' )[0]
if math.fabs( angle1 ) > minValue or math.fabs( angle2 ) > minValue or math.fabs( angle3 ) > minValue:
showDrivers.append( driver )
else:
showDrivers.append( driver )
childUis = cmds.scrollLayout( self._driverListLay, q=1, ca=1 )
if childUis:
for childUi in childUis:
cmds.deleteUI( childUi )
cmds.setParent( self._driverListLay )
for driver in showDrivers:
values = cmds.getAttr( driver+'.outDriver' )[0]
self._movedDriverInst.add( driver, values )
self._movedDrivers = showDrivers
self.reWriteValueCmd()
def reWriteValueCmd( self ):
items = cmds.textScrollList( self._driverScrollList, q=1, ai=1 )
if not items: items = []
for item in items:
driverName, other = item.split( ' | angle' )
angleIndex, angleValue = other.split( ' : ' )
angleValue = cmds.getAttr( driverName+'.outDriver%s' % angleIndex )
reItem = driverName+' | angle'+angleIndex+' : %3.2f' % angleValue
cmds.textScrollList( self._driverScrollList, e=1, ri=item )
if angleValue > 0.1:
cmds.textScrollList( self._driverScrollList, e=1, a=reItem )
class Add( Cmd ):
def __init__(self, width ):
self._emptyWidth = 10
self._width = width - self._emptyWidth*2 - 4
self._height = 140
sepList = [ 65, 50 ]
self._mainWidthList = uifnc.setWidthByPerList( sepList, self._width )
sepList = [ 70, 30 ]
self._optionWidthList = uifnc.setWidthByPerList( sepList, self._mainWidthList[0]-20 )
Cmd.__init__( self, self._mainWidthList[0] )
self._rowColumns = []
self.core()
def core(self):
column1 = cmds.rowColumnLayout( nc= 3, cw=[(1,self._emptyWidth),
(2,self._width),
(3,self._emptyWidth)])
uifnc.setSpace()
cmds.text( l='Driver LIST' )
uifnc.setSpace()
cmds.setParent( '..' )
uifnc.setSpace( 5 )
column2 = cmds.rowColumnLayout( nc=4, cw=[(1,self._emptyWidth),
(2,self._mainWidthList[0]),
(3,self._mainWidthList[1]),
(4,self._emptyWidth) ] )
uifnc.setSpace()
column3 = cmds.rowColumnLayout( nc=1, cw=[(1,self._mainWidthList[0])])
self._driverListLay = cmds.scrollLayout( h=self._height-30 )
cmds.setParent( '..' )
uifnc.setSpace( 5 )
column4 = cmds.rowColumnLayout( nc= 4, cw=[(1,self._emptyWidth),
(2,self._optionWidthList[0]),
(3,self._optionWidthList[1]),
(4,self._emptyWidth)] )
uifnc.setSpace()
self._movedDriverCheck = cmds.checkBox( l='Show Only Moved Drivers', cc= self.updateCmd )
cmds.button( l='Refresh', c= self.updateCmd )
uifnc.setSpace()
cmds.setParent( '..' )
cmds.setParent( '..' )
self._driverScrollList = cmds.textScrollList( h= self._height )
self._movedDriverInst = MovedDriverList( self._mainWidthList[0], self._driverScrollList )
uifnc.setSpace()
cmds.setParent( '..' )
self._rowColumns = [ column1, column2, column3, column4 ] | [
"kimsung9k@naver.com"
] | kimsung9k@naver.com |
532ef36c34decb44e73a5e1b81beb7a67c57cc0a | f3b233e5053e28fa95c549017bd75a30456eb50c | /ptp1b_input/L83/83-79_MD_NVT_rerun/set_1.py | e52dbdc3dbc2b5c4afd43f06c240d855b04ecbb2 | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 741 | py | import os
dir = '/mnt/scratch/songlin3/run/ptp1b/L83/MD_NVT_rerun/ti_one-step/83_79/'
filesdir = dir + 'files/'
temp_prodin = filesdir + 'temp_prod_1.in'
temp_pbs = filesdir + 'temp_1.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.chdir("%6.5f" %(j))
workdir = dir + "%6.5f" %(j) + '/'
#prodin
prodin = workdir + "%6.5f_prod_1.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f_1.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#submit pbs
#os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"songlin3@msu.edu"
] | songlin3@msu.edu |
29fe4042cd2cbd2f2ca9d31a58cf53afd5ba5298 | 4368c51ce45504e2cc17ea8772eeb94c13e1c34a | /utils/meta_utils.py | 2c8068279495081ff5d67c14a8d980c40f3f982b | [] | no_license | Shuai-Xie/metaASM | 1eddc02846ee3fc05198883277357f9735dbaeb0 | c6a7b8fe3ecbca2bdc874e3b0dad6dd8f8c1c4cd | refs/heads/master | 2021-03-18T17:57:12.952618 | 2020-04-03T14:20:12 | 2020-04-03T14:20:12 | 247,087,294 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,664 | py | import torch
import numpy as np
import random
from datasets import CIFAR
from datasets.dataset_utils import get_cls_img_idxs_dict
from datasets.transforms import transform_train
from utils.asm_utils import detect_unlabel_imgs, get_select_fn
"""
sort each cls samples by criterion
"""
@torch.no_grad()
def sort_cls_samples(model, label_dataset, num_classes, criterion='lc'):
# 每类图片 idxs
cls_img_idxs = get_cls_img_idxs_dict(label_dataset.targets, num_classes)
y_pred_prob = detect_unlabel_imgs(model, label_dataset.data, num_classes, bs=100) # [N,10] prob vector
sort_cls_idxs_dict = {}
assert criterion in ['rs', 'lc', 'ms', 'en'], 'no such criterion'
select_fn = get_select_fn(criterion)
for cls_idx, img_idxs in cls_img_idxs.items():
img_idxs = np.array(img_idxs)
cls_probs = y_pred_prob[img_idxs] # [n,10]
# sorted idxs in list
_, sort_cls_idxs = select_fn(cls_probs, n_samples=len(cls_probs)) # sort total
# recover to total label idx
sort_cls_idxs_dict[cls_idx] = img_idxs[sort_cls_idxs]
return sort_cls_idxs_dict
def check_sample_targets(cls_idxs_dict, targets):
for cls, img_idxs in cls_idxs_dict.items():
print('class:', cls, [targets[i] for i in img_idxs])
"""
build meta dataset by different sampling methods
"""
def build_meta_dataset(label_dataset, idx_to_meta):
random.shuffle(idx_to_meta) # 原本 samples 按 cls 顺序排列
meta_dataset = CIFAR(
data=np.take(label_dataset.data, idx_to_meta, axis=0),
targets=np.take(label_dataset.targets, idx_to_meta, axis=0),
transform=transform_train
)
return meta_dataset
# random sample
def random_sample_meta_dataset(label_dataset, num_meta, num_classes):
img_idxs = list(range(len(label_dataset.targets)))
random.shuffle(img_idxs)
idx_to_meta = img_idxs[:int(num_meta * num_classes)]
return build_meta_dataset(label_dataset, idx_to_meta)
def random_sample_equal_cls(label_dataset, cls_img_idxs_dict, num_meta):
idx_to_meta = []
for cls, img_idxs in cls_img_idxs_dict.items():
idx_to_meta.extend(random.sample(img_idxs, num_meta))
return build_meta_dataset(label_dataset, idx_to_meta)
# random sample in a systematic way, loyal to original data distribution
# cover all hard-level samples
def random_system_sample_meta_dataset(label_dataset, sort_cls_idxs_dict, num_meta, mid=None): # 等距抽样
idx_to_meta = []
for cls, img_idxs in sort_cls_idxs_dict.items(): # 能处理各类 样本数量不同, list 不等长
step = len(img_idxs) // num_meta
mid = mid % step if mid else random.randint(0, step) # 指定每个系统内 要取的元素位置
idx_to_meta.extend([img_idxs[min(i * step + mid, len(img_idxs) - 1)]
for i in range(num_meta)]) # 等间隔
return build_meta_dataset(label_dataset, idx_to_meta)
# sample top hard samples on label_dataset
# 不带随机后,选出的样本固定了...
def sample_top_hard_meta_dataset(label_dataset, sort_cls_idxs_dict, num_meta):
idx_to_meta = []
for cls, img_idxs in sort_cls_idxs_dict.items(): # 各类按难度降序排列
idx_to_meta.extend(img_idxs[:num_meta])
return build_meta_dataset(label_dataset, idx_to_meta)
# sample top easy samples on label_dataset
def sample_top_easy_meta_dataset(label_dataset, sort_cls_idxs_dict, num_meta):
idx_to_meta = []
for cls, img_idxs in sort_cls_idxs_dict.items(): # 各类按难度降序排列
idx_to_meta.extend(img_idxs[-num_meta:])
return build_meta_dataset(label_dataset, idx_to_meta)
| [
"shuaixie@zju.edu.cn"
] | shuaixie@zju.edu.cn |
9c39907cb189fd01a905f1183f03c509c54c9867 | c89e4099f801cb4e71b732f74ba2237883de0b16 | /spider/concurrent/concur_threads_insts.py | 0dbe75f7f91c002521ecda1898366e1fa47d83e3 | [
"BSD-2-Clause"
] | permissive | JiyangZhang/PSpider | 3abc14792875e306d4a0207f1cd872834c35335c | 2151bbdd028acfa5794acab6c87988dc4bf485d3 | refs/heads/master | 2021-08-19T01:31:22.975247 | 2017-11-24T10:03:10 | 2017-11-24T10:38:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,792 | py | # _*_ coding: utf-8 _*_
"""
concur_threads_insts.py by xianhu
"""
import time
import logging
from .concur_abase import TPEnum, BaseThread
# ===============================================================================================================================
def work_fetch(self):
"""
procedure of fetching, auto running, and return False if you need stop thread
"""
# ----1----
priority, url, keys, deep, repeat = self._pool.get_a_task(TPEnum.URL_FETCH)
# ----2----
fetch_result, content = self._worker.working(priority, url, keys, deep, repeat)
# ----3----
if fetch_result == 1:
self._pool.update_number_dict(TPEnum.URL_FETCH_SUCC, +1)
self._pool.add_a_task(TPEnum.HTM_PARSE, (priority, url, keys, deep, content))
elif fetch_result == 0:
self._pool.add_a_task(TPEnum.URL_FETCH, (priority+1, url, keys, deep, repeat+1))
else:
self._pool.update_number_dict(TPEnum.URL_FETCH_FAIL, +1)
# ----4----
self._pool.finish_a_task(TPEnum.URL_FETCH)
# ----5----
while (self._pool.get_number_dict(TPEnum.HTM_NOT_PARSE) > 500) or (self._pool.get_number_dict(TPEnum.ITEM_NOT_SAVE) > 500):
logging.debug("%s[%s] sleep 5 seconds because of too many 'HTM_NOT_PARSE' or 'ITEM_NOT_SAVE'...", self.__class__.__name__, self.getName())
time.sleep(5)
return False if fetch_result == -2 else True
FetchThread = type("FetchThread", (BaseThread,), dict(working=work_fetch))
# ===============================================================================================================================
def work_parse(self):
"""
procedure of parsing, auto running, and only return True
"""
# ----1----
priority, url, keys, deep, content = self._pool.get_a_task(TPEnum.HTM_PARSE)
# ----2----
parse_result, url_list, save_list = self._worker.working(priority, url, keys, deep, content)
# ----3----
if parse_result > 0:
self._pool.update_number_dict(TPEnum.HTM_PARSE_SUCC, +1)
for _url, _keys, _priority in url_list:
self._pool.add_a_task(TPEnum.URL_FETCH, (_priority, _url, _keys, deep+1, 0))
for item in save_list:
self._pool.add_a_task(TPEnum.ITEM_SAVE, (url, keys, item))
else:
self._pool.update_number_dict(TPEnum.HTM_PARSE_FAIL, +1)
# ----4----
self._pool.finish_a_task(TPEnum.HTM_PARSE)
return True
ParseThread = type("ParseThread", (BaseThread,), dict(working=work_parse))
# ===============================================================================================================================
def work_save(self):
"""
procedure of saving, auto running, and only return True
"""
# ----1----
url, keys, item = self._pool.get_a_task(TPEnum.ITEM_SAVE)
# ----2----
save_result = self._worker.working(url, keys, item)
# ----3----
if save_result:
self._pool.update_number_dict(TPEnum.ITEM_SAVE_SUCC, +1)
else:
self._pool.update_number_dict(TPEnum.ITEM_SAVE_FAIL, +1)
# ----4----
self._pool.finish_a_task(TPEnum.ITEM_SAVE)
return True
SaveThread = type("SaveThread", (BaseThread,), dict(working=work_save))
# ===============================================================================================================================
def init_monitor_thread(self, name, pool, sleep_time=5):
"""
constructor of MonitorThread
"""
BaseThread.__init__(self, name, None, pool)
self._sleep_time = sleep_time # sleeping time in every loop
self._init_time = time.time() # initial time of this spider
self._last_fetch_num = 0 # fetch number in last time
self._last_parse_num = 0 # parse number in last time
self._last_save_num = 0 # save number in last time
return
def work_monitor(self):
"""
monitor the pool, auto running, and return False if you need stop thread
"""
time.sleep(self._sleep_time)
info = "%s status: running_tasks=%s;" % (self._pool.__class__.__name__, self._pool.get_number_dict(TPEnum.TASKS_RUNNING))
cur_not_fetch = self._pool.get_number_dict(TPEnum.URL_NOT_FETCH)
cur_fetch_succ = self._pool.get_number_dict(TPEnum.URL_FETCH_SUCC)
cur_fetch_fail = self._pool.get_number_dict(TPEnum.URL_FETCH_FAIL)
cur_fetch_all = cur_fetch_succ + cur_fetch_fail
info += " fetch:[NOT=%d, SUCC=%d, FAIL=%d, %d/(%ds)];" % (cur_not_fetch, cur_fetch_succ, cur_fetch_fail, cur_fetch_all-self._last_fetch_num, self._sleep_time)
self._last_fetch_num = cur_fetch_all
cur_not_parse = self._pool.get_number_dict(TPEnum.HTM_NOT_PARSE)
cur_parse_succ = self._pool.get_number_dict(TPEnum.HTM_PARSE_SUCC)
cur_parse_fail = self._pool.get_number_dict(TPEnum.HTM_PARSE_FAIL)
cur_parse_all = cur_parse_succ + cur_parse_fail
info += " parse:[NOT=%d, SUCC=%d, FAIL=%d, %d/(%ds)];" % (cur_not_parse, cur_parse_succ, cur_parse_fail, cur_parse_all-self._last_parse_num, self._sleep_time)
self._last_parse_num = cur_parse_all
cur_not_save = self._pool.get_number_dict(TPEnum.ITEM_NOT_SAVE)
cur_save_succ = self._pool.get_number_dict(TPEnum.ITEM_SAVE_SUCC)
cur_save_fail = self._pool.get_number_dict(TPEnum.ITEM_SAVE_FAIL)
cur_save_all = cur_save_succ + cur_save_fail
info += " save:[NOT=%d, SUCC=%d, FAIL=%d, %d/(%ds)];" % (cur_not_save, cur_save_succ, cur_save_fail, cur_save_all-self._last_save_num, self._sleep_time)
self._last_save_num = cur_save_all
info += " total_seconds=%d" % (time.time() - self._init_time)
logging.warning(info)
return False if self._pool.get_monitor_stop_flag() else True
MonitorThread = type("MonitorThread", (BaseThread,), dict(__init__=init_monitor_thread, working=work_monitor))
| [
"qixianhu@qq.com"
] | qixianhu@qq.com |
f3ad2d30d023ac96ee324cece587c787ec28b6ad | 93652e0f73558ffa24059647324f79ba043ba241 | /topi/tests/python/test_topi_clip.py | 041565433bccd162ef55c48cb1e6cd6f106a8200 | [
"Apache-2.0"
] | permissive | souptc/tvm | 830b1444435b6bda267df305538a783eb687d473 | a8574e7bb814997cb3920a72035071899635b753 | refs/heads/master | 2020-03-25T12:42:20.686770 | 2018-08-06T21:07:38 | 2018-08-06T21:07:38 | 143,789,191 | 1 | 0 | Apache-2.0 | 2018-08-06T22:18:20 | 2018-08-06T22:18:19 | null | UTF-8 | Python | false | false | 1,458 | py | """Test code for clip operator"""
import numpy as np
import tvm
import topi
from topi.util import get_const_tuple
from tvm.contrib.pickle_memoize import memoize
def verify_clip(N, a_min, a_max, dtype):
A = tvm.placeholder((N, N), dtype=dtype, name='A')
B = topi.clip(A, a_min, a_max)
s = tvm.create_schedule([B.op])
# use memoize to pickle the test data for next time use
@memoize("topi.tests.test_topi_clip")
def get_ref_data():
a_np = np.random.uniform(a_min*2, a_max*2, size=(N, N)).astype(dtype)
b_np = np.clip(a_np, a_min, a_max)
return a_np, b_np
a_np, b_np = get_ref_data()
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
s = topi.generic.schedule_injective(B)
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=dtype), ctx)
f = tvm.build(s, [A, B], device, name="clip")
f(a, b)
np.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5)
for device in ['llvm', 'opencl']:
check_device(device)
def test_clip():
verify_clip(1024, -127, 127, 'float32')
verify_clip(1024, -127, 127, 'int16')
verify_clip(1024, -127, 127, 'int8')
if __name__ == "__main__":
test_clip()
| [
"tqchen@users.noreply.github.com"
] | tqchen@users.noreply.github.com |
2d38dcb91332ff3a7c9d232d62866608fb719f06 | ec0b8bfe19b03e9c3bb13d9cfa9bd328fb9ca3f1 | /res/packages/scripts/scripts/client/bwobsolete_helpers/PyGUI/PyGUIBase.py | 834a250a485148a54b8d4bd40344fe93be77ec21 | [] | no_license | webiumsk/WOT-0.9.20.0 | de3d7441c5d442f085c47a89fa58a83f1cd783f2 | 811cb4e1bca271372a1d837a268b6e0e915368bc | refs/heads/master | 2021-01-20T22:11:45.505844 | 2017-08-29T20:11:38 | 2017-08-29T20:11:38 | 101,803,045 | 0 | 1 | null | null | null | null | WINDOWS-1250 | Python | false | false | 5,952 | py | # 2017.08.29 21:44:03 Střední Evropa (letní čas)
# Embedded file name: scripts/client/bwobsolete_helpers/PyGUI/PyGUIBase.py
import BigWorld, GUI
import weakref
from bwdebug import *
from functools import partial
from Listener import Listenable
class PyGUIBase(object, Listenable):
def __init__(self, component = None):
Listenable.__init__(self)
self.component = component
self.eventHandler = None
self._parent = None
self.isActive = False
return
def active(self, state):
if state == self.isActive:
return
if not self.component:
return
self.isActive = state
if state:
if not self._parent:
GUI.addRoot(self.component)
else:
self._parent.addChild(self.component)
self.component.mouseButtonFocus = True
self.component.moveFocus = True
self.component.crossFocus = True
else:
if not self._parent:
GUI.delRoot(self.component)
else:
self._parent.delChild(self.component)
self.component.mouseButtonFocus = False
self.component.moveFocus = False
self.component.crossFocus = False
self.listeners.activated(state)
def _setparent(self, parent):
if self.isActive:
if not self._parent:
GUI.delRoot(self.component)
else:
self._parent.delChild(self.component)
if parent:
self._parent = weakref.proxy(parent)
else:
self._parent = parent
if self.isActive:
if not self._parent:
GUI.addRoot(self.component)
else:
self._parent.addChild(self.component)
def _getparent(self):
return self._parent
parent = property(_getparent, _setparent)
def getWindow(self):
import Window
if isinstance(self, Window.Window):
return self
elif self.component.parent and self.component.parent.script:
return self.component.parent.script.getWindow()
else:
return
return
def toggleActive(self):
self.active(not self.isActive)
def setEventHandler(self, eh):
self.eventHandler = eh
def doLayout(self, parent):
for name, child in self.component.children:
child.script.doLayout(self)
def setToolTipInfo(self, toolTipInfo):
self.toolTipInfo = toolTipInfo
def removeToolTipInfo(self):
if hasattr(self, toolTipInfo):
del self.toolTipInfo
def focus(self, state):
pass
def mouseButtonFocus(self, state):
pass
def handleInputLangChangeEvent(self):
return False
def handleKeyEvent(self, event):
return False
def handleMouseEvent(self, comp, event):
return False
def handleMouseButtonEvent(self, comp, event):
window = self.getWindow()
if window:
window.listeners.windowClicked()
return False
def handleMouseClickEvent(self, component):
return False
def handleMouseEnterEvent(self, comp):
if getattr(self, 'toolTipInfo', None):
import ToolTip
ToolTip.ToolTipManager.instance.setupToolTip(self.component, self.toolTipInfo)
return False
def handleMouseLeaveEvent(self, comp):
return False
def handleAxisEvent(self, event):
return False
def handleDragStartEvent(self, comp):
return False
def handleDragStopEvent(self, comp):
return False
def handleDragEnterEvent(self, comp, dragged):
return False
def handleDragLeaveEvent(self, comp, dragged):
return False
def handleDropEvent(self, comp, dropped):
return False
def handleIMEEvent(self, event):
return False
def onLoad(self, dataSection):
if dataSection.has_key('toolTipInfo'):
import ToolTip
self.toolTipInfo = ToolTip.ToolTipInfo()
self.toolTipInfo.onLoad(dataSection._toolTipInfo)
def onSave(self, dataSection):
if hasattr(self, 'toolTipInfo') and self.toolTipInfo is not None:
toolTipInfoSection = dataSection.createSection('toolTipInfo')
self.toolTipInfo.onSave(toolTipInfoSection)
return
def onBound(self):
for name, child in self.component.children:
if not child.script:
child.script = PyGUIBase(child)
raise isinstance(child.script, PyGUIBase) or AssertionError
self._bindEvents(self.__class__)
def _bindEvents(self, cls):
for name, function in cls.__dict__.iteritems():
if hasattr(function, '_PyGUIEventHandler'):
for componentName, eventName, args, kargs in function._PyGUIEventHandler:
if not callable(function):
raise AssertionError
component = self.component
for name in componentName.split('.'):
component = getattr(component, name, None)
if component is None:
break
component is None and ERROR_MSG("PyGUIEvent: '%s' has no component named '%s'." % (str(self), componentName))
continue
function = getattr(self, function.__name__)
setattr(component.script, eventName, partial(function, *args, **kargs))
for base in cls.__bases__:
self._bindEvents(base)
return
# okay decompyling c:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\bwobsolete_helpers\PyGUI\PyGUIBase.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.08.29 21:44:03 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
b3a177cdd830bf2a2b57fc5cb16d754555abd759 | 9f7a9f268abfc168e408e36b513132402fdd353c | /micro_detect/out1.2.py | 0f6038c2781d0daf80f281d60b7d92ce7d906c17 | [] | no_license | 863752027z/lab_server | fe602bf0a588989b0a7ae171454eba67fa6907ca | 65eeaf94712afd96363d449376a291918156354f | refs/heads/master | 2020-08-10T12:40:30.803168 | 2019-10-11T04:51:14 | 2019-10-11T04:51:14 | 214,344,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,556 | py | import cv2
import os
import numpy as np
import pandas as pd
import datetime
import matplotlib.pyplot as plt
import torch
import torchvision
import torch.nn as nn
import torch.utils.data as Data
from torchvision import transforms, datasets
from collections import OrderedDict
os.environ["CUDA_VISIBLE_DEVICES"] = "6"
device = torch.device('cuda:0')
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
encoder_layer = OrderedDict([
('Con1', nn.Conv2d(3, 32, 4, stride=2, padding=1)),
('BatchNorm1', nn.BatchNorm2d(32)),
('LeakyReLU1', nn.LeakyReLU(0.2, True)),
('Con2', nn.Conv2d(32, 64, 4, stride=2, padding=1)),
('BatchNorm2', nn.BatchNorm2d(64)),
('LeakyReLU2', nn.LeakyReLU(0.2, True)),
('Con3', nn.Conv2d(64, 128, 4, stride=2, padding=1)),
('BatchNorm3', nn.BatchNorm2d(128)),
('LeakyReLU3', nn.LeakyReLU(0.2, True)),
('Con4', nn.Conv2d(128, 256, 4, stride=2, padding=1)),
('BatchNorm4', nn.BatchNorm2d(256)),
('LeakyReLU4', nn.LeakyReLU(0.2, True)),
('Con5', nn.Conv2d(256, 256, 4, stride=2, padding=1)),
('BatchNorm5', nn.BatchNorm2d(256)),
('LeakyReLU5', nn.LeakyReLU(0.2, True)),
('Con6', nn.Conv2d(256, 256, 4, stride=2, padding=1)),
('BatchNorm6', nn.BatchNorm2d(256)),
('LeakyReLU6', nn.LeakyReLU(0.2, True)),
('Con7', nn.Conv2d(256, 256, 4, stride=2, padding=1)),
('BatchNorm7', nn.BatchNorm2d(256)),
('LeakyReLU7', nn.LeakyReLU(0.2, True)),
('Con8', nn.Conv2d(256, 256, 4, stride=2, padding=1)),
])
self.Encoder = nn.Sequential(encoder_layer)
def forward(self, x):
x = self.Encoder(x)
return x
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
decoder_layer = OrderedDict([
('Upsample1', nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)),
('Con1', nn.Conv2d(512, 32, 3, stride=1, padding=1)),
('BatchNorm1', nn.BatchNorm2d(32)),
('ReLU1', nn.ReLU()),
('Upsample2', nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)),
('Con2', nn.Conv2d(32, 64, 3, stride=1, padding=1)),
('BatchNorm2', nn.BatchNorm2d(64)),
('ReLU2', nn.ReLU()),
('Upsample3', nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)),
('Con3', nn.Conv2d(64, 128, 3, stride=1, padding=1)),
('BatchNorm3', nn.BatchNorm2d(128)),
('ReLU3', nn.ReLU()),
('Upsample4', nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)),
('Con4', nn.Conv2d(128, 256, 3, stride=1, padding=1)),
('BatchNorm4', nn.BatchNorm2d(256)),
('ReLU4', nn.ReLU()),
('Upsample5', nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)),
('Con5', nn.Conv2d(256, 256, 3, stride=1, padding=1)),
('BatchNorm5', nn.BatchNorm2d(256)),
('ReLU5', nn.ReLU()),
('Upsample6', nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)),
('Con5', nn.Conv2d(256, 256, 3, stride=1, padding=1)),
('BatchNorm5', nn.BatchNorm2d(256)),
('ReLU6', nn.ReLU()),
('Upsample7', nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)),
('Con7', nn.Conv2d(256, 256, 3, stride=1, padding=1)),
('BatchNorm7', nn.BatchNorm2d(256)),
('ReLU7', nn.ReLU()),
('Upsample8', nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)),
('Con8', nn.Conv2d(256, 3, 3, stride=1, padding=1)),
('Tanh', nn.Tanh())
])
self.Decoder = nn.Sequential(decoder_layer)
def forward(self, x):
x = self.Decoder(x)
return x
class LstmCell(nn.Module):
def __init__(self):
super(LstmCell, self).__init__()
self.LstmCell = nn.LSTMCell(input_size=256, hidden_size=256)
def forward(self, xt, h, c):
x = [h, c]
h, c = self.LstmCell(xt, x)
return h, c
def printGPU():
for i in range(torch.cuda.device_count()):
print(i, torch.cuda.get_device_name(0))
def draw(loss_list):
x = range(0, len(loss_list))
y = loss_list
plt.subplot(2, 1, 1)
plt.plot(x, y, 'r-')
plt.xlabel('batch_num')
plt.ylabel('loss')
plt.show()
def save_data_to_excel(data, path):
print(datetime.datetime.now())
print('generating:', path)
print(data.shape)
data_df = pd.DataFrame(data)
writer = pd.ExcelWriter(path)
data_df.to_excel(writer, 'page_1', float_format='%.5f') # float_format 控制精度
writer.save()
print('done')
def read_data_from_excel(path):
df = pd.read_excel(path, 'page_1')
data = np.array(df)
data = np.delete(data, 0, axis=1)
return data
def get_path(base_path):
path_list = []
for root, dirs, files in os.walk(base_path):
for i in range(len(dirs)):
temp_path = base_path + '/' + dirs[i]
path_list.append(temp_path)
break
return path_list
def trainLoader(file_path, batch_size, shuffle, num_workers):
data_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5))])
data_set = datasets.ImageFolder(file_path,
transform=data_transform)
train_loader = Data.DataLoader(dataset=data_set,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers)
return train_loader
def testLoader(file_path, batch_size, shuffle, num_workers):
data_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5))])
data_set = datasets.ImageFolder(file_path,
transform=data_transform)
test_loader = Data.DataLoader(dataset=data_set,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers)
return test_loader
def train(loader_list, learning_rate, num_epochs, seq):
cell_model = LstmCell().to(device)
encoder_model = Encoder().to(device)
decoder_model = Decoder().to(device)
criterion = nn.MSELoss().to(device)
optimizer = torch.optim.SGD([
{'params': encoder_model.parameters()},
{'params': cell_model.parameters()},
{'params': decoder_model.parameters()}
], lr=learning_rate, momentum=0.9)
loss_list = []
for epoch in range(num_epochs):
for i in range(len(loader_list)):
train_loader = loader_list[i]
for idx, (data, label) in enumerate(train_loader):
if data.shape[0] < seq:
break
h = torch.zeros(seq-1, 256).to(device)
c = torch.zeros(seq-1, 256).to(device)
data = data.to(device) #4*3*256*256
# =========forward===========
encoder_output = encoder_model(data)
encoder_output = encoder_output.view((encoder_output.shape[0], encoder_output.shape[1])) #4*256
temp_target = encoder_output[0].view(1, encoder_output.shape[1], 1, 1) #1*256*1*1
temp_source = encoder_output[1:].view(3, encoder_output.shape[1]) #3*256
h, c = cell_model(temp_source, h, c)
cell_output = h[-1].view(1, 256, 1, 1)
decoder_input = torch.cat((temp_target, cell_output), 1) #1*512*1*1
decoder_output = decoder_model(decoder_input) #1*3*256*256
target_img = data[0:1, :, :, :] #1*3*256*256
loss = criterion(decoder_output, target_img)
# =========backward=========
optimizer.zero_grad()
loss.backward()
optimizer.step()
# ============log===========
print('epoch [{}/{}], batch [{}], loader [{}] loss:{:.4f}'
.format(epoch + 1, num_epochs, idx, i, loss.item()))
if epoch % 2 == 0:
loss_list.append(loss.item())
return loss_list, encoder_model, cell_model
def test(encoder_moudle, cell_moudle, loader, seq):
with torch.no_grad():
for idx, (data, label) in enumerate(loader):
data = data.to(device)
if idx <= seq - 2:
if idx == 0:
Quad = data
else:
Quad = torch.cat((Quad, data), 0)
if idx == seq - 1:
Quad = torch.cat((Quad, data), 0)
h = torch.zeros(seq - 1, 256).to(device)
c = torch.zeros(seq - 1, 256).to(device)
encoder_out = encoder_moudle(Quad)
temp_target = encoder_out[0:1, :, :, :]
temp_source = encoder_out[1:, :, :, :].view(seq - 1, 256)
h, c = cell_moudle(temp_source, h, c)
cell_out = h[-1].view(1, 256, 1, 1)
feature = torch.cat((temp_target, cell_out), 1).to(device)
if idx >= seq:
h = torch.zeros(seq - 1, 256).to(device)
c = torch.zeros(seq - 1, 256).to(device)
Quad = Quad[1:, :, :, :]
Quad = torch.cat((Quad, data), 0)
encoder_out = encoder_moudle(Quad)
temp_target = encoder_out[0:1, :, :, :]
temp_source = encoder_out[1:, :, :, :].view(seq - 1, 256)
h, c = cell_moudle(temp_source, h, c)
cell_out = h[-1].view(1, 256, 1, 1)
curr_feature = torch.cat((temp_target, cell_out), 1)
feature = torch.cat((feature, curr_feature), 0)
feature = feature.cpu().detach().view(feature.shape[0], feature.shape[1]).numpy()
return feature
def gen_train_feature(encoder_moudle, cell_moudle, path_list, save_path, seq):
for i in range(len(path_list)):
curr_path = save_path + '/' + path_list[i][29:] + '.xlsx'
temp_loader = testLoader(path_list[i], batch_size=1, shuffle=False, num_workers=8)
feature = test(encoder_moudle, cell_moudle, temp_loader, seq)
print('generating ' + curr_path)
save_data_to_excel(feature, curr_path)
printGPU()
base_path = '/home/zlw/dataset/SAMM/train'
encoder_moudle_path = '/home/zlw/dataset/SAMM/moudle/encoder_moudle_40.pkl'
cell_moudle_path = '/home/zlw/dataset/SAMM/moudle/cell_moudle_40.pkl'
save_path = '/home/zlw/dataset/SAMM/train_feature'
learning_rate = 1e-4
batch_size = 4
num_workers = 8
num_epochs = 40
path_list = get_path(base_path)
loader_list = []
for i in range(len(path_list)):
temp_loader = trainLoader(path_list[i], batch_size, False, num_workers)
loader_list.append(temp_loader)
loss_list, encoder_moudle, cell_moudle = train(loader_list, learning_rate, num_epochs, batch_size)
torch.save(encoder_moudle, encoder_moudle_path)
torch.save(cell_moudle, cell_moudle_path)
print(str(datetime.datetime.now()) + ' moudle save successfully\n')
draw(loss_list)
| [
"863752027@qq.com"
] | 863752027@qq.com |
24e24c1bb50cbbd0c3f4af14a06c6dcf353f6fe4 | 425db5a849281d333e68c26a26678e7c8ce11b66 | /LeetCodeSolutions/LeetCode_0252.py | ccf1daf59fe8c44bc1f9575209b20c8851cafb90 | [
"MIT"
] | permissive | lih627/python-algorithm-templates | e8092b327a02506086414df41bbfb2af5d6b06dc | a61fd583e33a769b44ab758990625d3381793768 | refs/heads/master | 2021-07-23T17:10:43.814639 | 2021-01-21T17:14:55 | 2021-01-21T17:14:55 | 238,456,498 | 29 | 8 | null | null | null | null | UTF-8 | Python | false | false | 361 | py | from typing import List
class Solution:
def canAttendMeetings(self, intervals: List[List[int]]) -> bool:
if not intervals:
return True
intervals.sort()
end = -1
for interval in intervals:
if interval[0] < end:
return False
end = max(end, interval[1])
return True
| [
"lih627@outlook.com"
] | lih627@outlook.com |
213840862cac4a5e0577be766248cd201e560514 | be6b4181de09a50ccbd7caea58dbdbcbf90602be | /numba/servicelib/threadlocal.py | 2ad13112109b26cdbb93c40202dffb8edc1a6bf4 | [
"BSD-2-Clause"
] | permissive | pombreda/numba | 6490c73fcc0ec5d93afac298da2f1068c0b5ce73 | 25326b024881f45650d45bea54fb39a7dad65a7b | refs/heads/master | 2021-01-15T10:37:08.119031 | 2014-11-06T22:32:48 | 2014-11-06T22:32:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 910 | py | """
Implements:
- Threadlocal stack
"""
from __future__ import print_function, absolute_import, division
import threading
class TLStack(object):
def __init__(self):
self.local = threading.local()
@property
def stack(self):
try:
# Retrieve thread local stack
return self.local.stack
except AttributeError:
# Initialize stack for the thread
self.local.stack = []
def push(self, item):
self.stack.append(item)
def pop(self):
return self.stack.pop()
@property
def top(self):
return self.stack[-1]
@property
def is_empty(self):
return not self.stack
def __bool__(self):
return not self.is_empty
def __nonzero__(self):
return self.__bool__()
def __len__(self):
return len(self.stack)
def clear(self):
self.__init__()
| [
"michael.lam.sk@gmail.com"
] | michael.lam.sk@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.