text stringlengths 4 1.02M | meta dict |
|---|---|
"""
Django settings for betleague project.
Generated by 'django-admin startproject' using Django 1.11.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '01_h%-grmbhldp9588#689=g#55$4zz)$33rhu=r1i%p_dog0#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['localhost', 'betleague-staging.herokuapp.com', 'betleague.herokuapp.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'competitions',
'matches',
'accounts',
'functional_tests',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'betleague.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'betleague.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Kiev'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static/')
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
LOGIN_REDIRECT_URL = '/'
import dj_database_url
db_from_env = dj_database_url.config()
DATABASES['default'].update(db_from_env)
| {
"content_hash": "1b461d2d6be4f44f2f4f466ebb9f60ae",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 91,
"avg_line_length": 28.055118110236222,
"alnum_prop": 0.696042660679203,
"repo_name": "asyler/betleague",
"id": "c904c0c9f084f2d1749c82f9b9d67aa9e056e33c",
"size": "3563",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "betleague/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2543"
},
{
"name": "HTML",
"bytes": "14185"
},
{
"name": "JavaScript",
"bytes": "879"
},
{
"name": "Python",
"bytes": "74891"
}
],
"symlink_target": ""
} |
def get_students_with_grade(students, grade):
return list(filter(lambda student: student[1] == grade, students))
def get_second_lowest_grade(students):
unique_grades = set(map(get_grade, students))
ascending_grades = sorted(unique_grades)
return ascending_grades[1] # return second lowest grade
def get_grade(student):
return student[1]
if __name__ == '__main__':
students = []
for _ in range(int(input())):
name = input()
score = float(input())
students.append([name, score])
second_lowest_grade = get_second_lowest_grade(students)
students_with_grade = get_students_with_grade(
students, second_lowest_grade)
sorted_students = sorted(students_with_grade)
for name, _score in sorted_students:
print(name)
| {
"content_hash": "0cb122dee81f8917941490da80b87877",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 70,
"avg_line_length": 29.555555555555557,
"alnum_prop": 0.6578947368421053,
"repo_name": "rootulp/hackerrank",
"id": "872e71cb33d03ed20d6dbafbcb96938407c6ec7c",
"size": "798",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "python/nested-list.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "695"
},
{
"name": "HTML",
"bytes": "3180"
},
{
"name": "Java",
"bytes": "55554"
},
{
"name": "JavaScript",
"bytes": "18863"
},
{
"name": "Python",
"bytes": "116652"
},
{
"name": "Ruby",
"bytes": "44389"
},
{
"name": "Shell",
"bytes": "1226"
}
],
"symlink_target": ""
} |
from django.core.management.base import BaseCommand
from payments.utils import send_unpaid_order_email_notifications
class Command(BaseCommand):
help = 'Send notifications for unpaid order by emails'
def handle(self, *args, **options):
send_unpaid_order_email_notifications(verbose=options['verbosity'])
if options['verbosity'] > 0:
self.stdout.write(self.style.SUCCESS('Successfully sent unpaid order notifications.'))
| {
"content_hash": "b8442c291f0d8bfd26447b99e8495094",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 98,
"avg_line_length": 35.46153846153846,
"alnum_prop": 0.7310195227765727,
"repo_name": "Matusf/django-konfera",
"id": "7f02aa73e5bb2d572d9d81ebd80dee28f79b7805",
"size": "461",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "payments/management/commands/send_unpaid_notifications.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11931"
},
{
"name": "HTML",
"bytes": "19983"
},
{
"name": "Python",
"bytes": "148078"
}
],
"symlink_target": ""
} |
from collections import OrderedDict
from zorro.di import di, has_dependencies, dependency
from tilenol.event import Event
class LayoutMeta(type):
@classmethod
def __prepare__(cls, name, bases):
return OrderedDict()
def __init__(cls, name, bases, dic):
cls.fields = list(dic.keys())
@has_dependencies
class Layout(metaclass=LayoutMeta):
def __init__(self):
self.visible = False
self.relayout = Event('layout.relayout')
self.relayout.listen(self.check_relayout)
def check_relayout(self):
if self.visible:
self.layout()
self.group.check_focus()
@classmethod
def get_defined_classes(cls, base):
res = OrderedDict()
for k in cls.fields:
v = getattr(cls, k)
if isinstance(v, type) and issubclass(v, base):
res[k] = v
return res
def dirty(self):
self.relayout.emit()
def all_visible_windows(self):
for i in getattr(self, 'visible_windows', ()):
yield i
sub = getattr(self, 'sublayouts', None)
if sub:
for s in sub():
for i in s.visible_windows:
yield i
def hide(self):
self.visible = False
for i in self.all_visible_windows():
i.hide()
def show(self):
self.visible = True
for i in self.all_visible_windows():
i.show()
| {
"content_hash": "3312966af97795bbd1d256f96300c5be",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 59,
"avg_line_length": 24.233333333333334,
"alnum_prop": 0.561898211829436,
"repo_name": "tailhook/tilenol",
"id": "f39b78feb970f0e9ff6811c050bf186c9ff5b595",
"size": "1454",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tilenol/layout/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "228140"
},
{
"name": "Shell",
"bytes": "285"
}
],
"symlink_target": ""
} |
from morphforge.traces.tracetypes import TraceFixedDT
from morphforge.traces.tracetypes import TraceVariableDT
from morphforge.traces.tracetypes import TracePointBased
from morphforge.traces.tracetypes import TracePiecewise
from morphforge.traces.tracetypes import TracePieceFunctionLinear
from morphforge.traces.tracetypes import TracePieceFunctionFlat
from morphforge.traces.tracetypes import TracePieceFunctionLinear
# Need so that they register the methods:
import morphforge.traces.methods
import morphforge.traces.operators
from morphforge.traces.traceobjpluginctrl import TraceOperatorCtrl
from morphforge.traces.traceobjpluginctrl import TraceMethodCtrl
from morphforge.traces.tags import TagSelector
__all__ = [
'TraceFixedDT',
'TraceVariableDT',
'TracePointBased',
'TracePiecewise',
'TracePieceFunctionLinear',
'TracePieceFunctionFlat',
'TracePieceFunctionLinear',
'TagSelector',
'TraceOperatorCtrl',
'TraceMethodCtrl',
]
| {
"content_hash": "7e9605edb6e1696db4113e566512a49b",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 66,
"avg_line_length": 32.7,
"alnum_prop": 0.81855249745158,
"repo_name": "mikehulluk/morphforge",
"id": "7dae8020575b14f619319a7ba4c98f55eed31cb9",
"size": "2520",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/morphforge/traces/__init__.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "AMPL",
"bytes": "4818"
},
{
"name": "C",
"bytes": "1499"
},
{
"name": "Makefile",
"bytes": "4436"
},
{
"name": "Python",
"bytes": "1557833"
},
{
"name": "Shell",
"bytes": "14"
},
{
"name": "XSLT",
"bytes": "94266"
}
],
"symlink_target": ""
} |
from collections import defaultdict
from os import listdir
from os.path import abspath, dirname, isdir, isfile, join, realpath, relpath, splitext
import re
from subprocess import Popen, PIPE
import sys
# Runs the tests.
WREN_DIR = dirname(dirname(realpath(__file__)))
TEST_DIR = join(WREN_DIR, 'test')
WREN_APP = join(WREN_DIR, 'wrend')
EXPECT_PATTERN = re.compile(r'// expect: (.*)')
EXPECT_ERROR_PATTERN = re.compile(r'// expect error')
EXPECT_ERROR_LINE_PATTERN = re.compile(r'// expect error line (\d+)')
EXPECT_RUNTIME_ERROR_PATTERN = re.compile(r'// expect runtime error: (.+)')
ERROR_PATTERN = re.compile(r'\[.* line (\d+)\] Error')
STACK_TRACE_PATTERN = re.compile(r'\[.* line (\d+)\] in')
SKIP_PATTERN = re.compile(r'// skip: (.*)')
NONTEST_PATTERN = re.compile(r'// nontest')
if sys.platform == 'win32':
class color:
GREEN = ''
RED = ''
DEFAULT = ''
PINK = ''
YELLOW = ''
else:
class color:
GREEN = '\033[32m'
RED = '\033[31m'
DEFAULT = '\033[0m'
PINK = '\033[91m'
YELLOW = '\033[33m'
passed = 0
failed = 0
skipped = defaultdict(int)
num_skipped = 0;
def walk(dir, callback):
""" Walks [dir], and executes [callback] on each file. """
dir = abspath(dir)
for file in [file for file in listdir(dir) if not file in [".",".."]]:
nfile = join(dir, file)
if isdir(nfile):
walk(nfile, callback)
else:
callback(nfile)
def print_line(line=None):
# Erase the line.
print '\033[2K',
# Move the cursor to the beginning.
print '\r',
if line:
print line,
sys.stdout.flush()
def run_test(path):
global passed
global failed
global skipped
global num_skipped
if (splitext(path)[1] != '.wren'):
return
# Check if we are just running a subset of the tests.
if len(sys.argv) == 2:
this_test = relpath(path, join(WREN_DIR, 'test'))
if not this_test.startswith(sys.argv[1]):
return
# Make a nice short path relative to the working directory.
path = relpath(path)
# Read the test and parse out the expectations.
expect_output = []
expect_error = []
expect_runtime_error_line = 0
expect_runtime_error = None
expect_return = 0
print_line('Passed: ' + color.GREEN + str(passed) + color.DEFAULT +
' Failed: ' + color.RED + str(failed) + color.DEFAULT +
' Skipped: ' + color.YELLOW + str(num_skipped) + color.DEFAULT)
line_num = 1
with open(path, 'r') as file:
for line in file:
match = EXPECT_PATTERN.search(line)
if match:
expect_output.append((match.group(1), line_num))
match = EXPECT_ERROR_PATTERN.search(line)
if match:
expect_error.append(line_num)
# If we expect compile errors, it should exit with EX_DATAERR.
expect_return = 65
match = EXPECT_ERROR_LINE_PATTERN.search(line)
if match:
expect_error.append(int(match.group(1)))
# If we expect compile errors, it should exit with EX_DATAERR.
expect_return = 65
match = EXPECT_RUNTIME_ERROR_PATTERN.search(line)
if match:
expect_runtime_error_line = line_num
expect_runtime_error = match.group(1)
# If we expect a runtime error, it should exit with EX_SOFTWARE.
expect_return = 70
match = SKIP_PATTERN.search(line)
if match:
num_skipped += 1
skipped[match.group(1)] += 1
return
match = NONTEST_PATTERN.search(line)
if match:
# Not a test file at all, so ignore it.
return
line_num += 1
# Invoke wren and run the test.
proc = Popen([WREN_APP, path], stdout=PIPE, stderr=PIPE)
(out, err) = proc.communicate()
(out, err) = out.replace('\r\n', '\n'), err.replace('\r\n', '\n')
fails = []
# Validate that no unexpected errors occurred.
if expect_return != 0 and err != '':
lines = err.split('\n')
if expect_runtime_error:
# Make sure we got the right error.
if lines[0] != expect_runtime_error:
fails.append('Expected runtime error "' + expect_runtime_error +
'" and got:')
fails.append(lines[0])
# Make sure the stack trace has the right line.
match = STACK_TRACE_PATTERN.search(lines[1])
if not match:
fails.append('Expected stack trace and got:')
fails.append(lines[1])
else:
stack_line = int(match.group(1))
if stack_line != expect_runtime_error_line:
fails.append('Expected runtime error on line ' +
str(expect_runtime_error_line) + ' but was on line ' +
str(stack_line))
else:
lines = err.split('\n')
while len(lines) > 0:
line = lines.pop(0)
match = ERROR_PATTERN.search(line)
if match:
if float(match.group(1)) not in expect_error:
fails.append('Unexpected error:')
fails.append(line)
elif line != '':
fails.append('Unexpected output on stderr:')
fails.append(line)
else:
for line in expect_error:
fails.append('Expected error on line ' + str(line) + ' and got none.')
if expect_runtime_error:
fails.append('Expected runtime error "' + expect_runtime_error +
'" and got none.')
# Validate the exit code.
if proc.returncode != expect_return:
fails.append('Expected return code {0} and got {1}. Stderr:'
.format(expect_return, proc.returncode))
fails += err.split('\n')
else:
# Validate the output.
expect_index = 0
# Remove the trailing last empty line.
out_lines = out.split('\n')
if out_lines[-1] == '':
del out_lines[-1]
for line in out_lines:
if expect_index >= len(expect_output):
fails.append('Got output "{0}" when none was expected.'.format(line))
elif expect_output[expect_index][0] != line:
fails.append('Expected output "{0}" on line {1} and got "{2}".'.
format(expect_output[expect_index][0],
expect_output[expect_index][1], line))
expect_index += 1
while expect_index < len(expect_output):
fails.append('Missing expected output "{0}" on line {1}.'.
format(expect_output[expect_index][0],
expect_output[expect_index][1]))
expect_index += 1
# Display the results.
if len(fails) == 0:
passed += 1
#print color.GREEN + 'PASS' + color.DEFAULT + ': ' + path
else:
failed += 1
print_line(color.RED + 'FAIL' + color.DEFAULT + ': ' + path)
print
for fail in fails:
print ' ', color.PINK + fail + color.DEFAULT
print
walk(TEST_DIR, run_test)
print_line()
if failed == 0:
print 'All ' + color.GREEN + str(passed) + color.DEFAULT + ' tests passed.'
else:
print (color.GREEN + str(passed) + color.DEFAULT + ' tests passed. ' +
color.RED + str(failed) + color.DEFAULT + ' tests failed.')
for key in sorted(skipped.keys()):
print ('Skipped ' + color.YELLOW + str(skipped[key]) + color.DEFAULT +
' tests: ' + key)
if failed != 0:
sys.exit(1) | {
"content_hash": "8e8eb00ddb9d6b537d8155a7616ab82a",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 86,
"avg_line_length": 29.957264957264957,
"alnum_prop": 0.6042796005706134,
"repo_name": "daimajia/wren",
"id": "ba3a6562a386684135cbc900b3cb1aac9a887e8b",
"size": "7029",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "script/test.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "212339"
},
{
"name": "C++",
"bytes": "21440"
},
{
"name": "CSS",
"bytes": "5648"
},
{
"name": "Lua",
"bytes": "5473"
},
{
"name": "Makefile",
"bytes": "1568"
},
{
"name": "Python",
"bytes": "43154"
},
{
"name": "Ruby",
"bytes": "4513"
}
],
"symlink_target": ""
} |
from eps2pdf_converter import psfrag_replace
import sys
from PyQt4 import QtCore, QtGui
# from eps2pdf_converter import psfrag_replace
class MainWindow(QtGui.QMainWindow):
def __init__(self):
QtGui.QMainWindow.__init__(self)
self.resize(350, 250)
self.setWindowTitle('eps2PDF Converter')
# Show the status bar
self.statusBar()
# xxxxx Menubar and toolbar xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# # Common action
# exit = QtGui.QAction(QtGui.QIcon('icons/exit.png'), 'Exit', self)
# exit.setShortcut('Ctrl+Q')
# exit.setStatusTip('Exit application')
# self.connect(exit, QtCore.SIGNAL('triggered()'), QtCore.SLOT('close()'))
# # Menubar
# menubar = self.menuBar()
# file = menubar.addMenu('&File')
# file.addAction(exit)
# # Toolbar
# toolbar = self.addToolBar('Exit')
# toolbar.addAction(exit)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxx Central Area xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# Layout
centralWindow = QtGui.QWidget()
self.centralVbox = QtGui.QVBoxLayout()
centralWindow.setLayout(self.centralVbox)
# File Input
self.set_input_file_part()
# Psfrag Replacements
self.set_psfragreplacements_part()
# Ok/Quit buttons part
self.set_convert_quit_buttons()
# Set the central widget
self.setCentralWidget(centralWindow)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
def set_input_file_part(self):
"""
Set-up the filename label, line edit and file chooser button
"""
hbox = QtGui.QHBoxLayout()
hbox.addWidget(QtGui.QLabel('Input File'))
self.fileNameText = QtGui.QLineEdit()
hbox.addWidget(self.fileNameText)
self.connect(self.fileNameText, QtCore.SIGNAL('editingFinished()'), self.comboBoxChanged)
fileButton = QtGui.QPushButton('Browse')
hbox.addWidget(fileButton)
self.fileChooserDialog = QtGui.QFileDialog(self)
self.connect(fileButton, QtCore.SIGNAL('clicked()'), self.show_dialog)
self.centralVbox.addLayout(hbox)
def set_psfragreplacements_part(self):
# Text area with the psfrag replacements (from the user or from the file)
self.inputPsfragReplacements = QtGui.QPlainTextEdit()
# Variable to store the psfrag replacements from the user when
# self.inputPsfragReplacements is changed to the content of the
# file. This way we can restore it later
self.oldPsfragReplacementText = ""
# self.connect(self.inputPsfragReplacements,
# QtCore.SIGNAL('textChanged()'), self.updatePsfragStatus)
hbox = QtGui.QHBoxLayout()
hbox.addWidget(QtGui.QLabel('Psfrag Replacements:'))
self.comboBox = QtGui.QComboBox()
self.comboBox.addItem("User Input")
self.comboBox.addItem("File Input")
hbox.addWidget(self.comboBox)
self.updatePsfragReplacementsFromUser()
self.connect(self.comboBox, QtCore.SIGNAL('currentIndexChanged(int)'), self.comboBoxChanged)
self.centralVbox.addLayout(hbox)
self.centralVbox.addWidget(self.inputPsfragReplacements)
def comboBoxChanged(self):
currentIndex = self.comboBox.currentIndex()
if(currentIndex == 0):
self.updatePsfragReplacementsFromUser()
if(currentIndex == 1):
self.updatePsfragReplacementsFromFile()
def updatePsfragReplacementsFromUser(self):
"""
When the combobox changes to the User Input this function changes
the text area with the psfrag replacements accordingly also
restoring any previously text input by the user.
"""
self.statusBar().showMessage("Using psfrag replacements from user input", 3000)
self.inputPsfragReplacements.setPlainText(self.oldPsfragReplacementText)
self.inputPsfragReplacements.setEnabled(True)
def updatePsfragReplacementsFromFile(self):
"""
When the combobox changes to the File Input this function changes
the text area with the psfrag replacements accordingly.
"""
self.inputPsfragReplacements.setEnabled(False)
self.oldPsfragReplacementText = self.inputPsfragReplacements.toPlainText()
(figDir, figShortName, figExtension) = self.getCanonizeFigName()
# Name of the file with psfrag replacements (if it exists)
psfragFile = QtCore.QFile(figDir.absolutePath() + "/" + figShortName + ".psfrags")
# This will have the psfrag replacements text at the end of the
# function
psfragText = ""
if(psfragFile.exists()):
psfragFile.open(QtCore.QIODevice.ReadOnly | QtCore.QIODevice.Text)
psfragText = psfragFile.readAll()
psfragFile.close()
self.statusBar().showMessage("Using psfrag replacements from file %s.psfrags" % psfragFile, 3000)
else:
self.statusBar().showMessage("File %s.psfrags does not exist" % figName, 3000)
# TODO: Change QtCore.QString(psfragText) to something that can
# handle accentuated characters
self.inputPsfragReplacements.setPlainText(QtCore.QString.fromUtf8(psfragText))
# figName = self.fileNameText.text()
# psfragText = ""
# if(os.path.isfile("%s.psfrags" % figName)):
# fID = open("%s.psfrags" % figName)
# psfragText = QtCore.QString(fID.read())
# fID.close()
# self.statusBar().showMessage("Using psfrag replacements from file %s.psfrags" % figName,3000)
# else:
# self.statusBar().showMessage("File %s.psfrags does not exist" % figName,3000)
# self.inputPsfragReplacements.setPlainText(psfragText)
def show_dialog(self):
text = QtGui.QFileDialog.getOpenFileName(
self,
filter="Eps files (*.eps);;All Files (*)")
# If the user cancel the dialog text will be None and we don't
# change whatever is in fileNameText
if text:
self.fileNameText.setText(text)
def set_convert_quit_buttons(self):
hbox = QtGui.QHBoxLayout()
okButton = QtGui.QPushButton("&Convert")
quitButton = QtGui.QPushButton("&Quit")
hbox.addWidget(okButton)
hbox.addWidget(quitButton)
self.centralVbox.addLayout(hbox)
# Connect the clicked signals
self.connect(quitButton, QtCore.SIGNAL('clicked()'), QtCore.SLOT("close()"))
self.connect(okButton, QtCore.SIGNAL('clicked()'), self.convert)
# def updatePsfragStatus(self):
# psfragText = self.inputPsfragReplacements.toPlainText()
# if psfragText.simplified().isEmpty():
# self.statusBar().showMessage("No psfrag replacements",3000)
def getCanonizeFigName(self):
"""
Separate the path from the file name and also separates the file
name from the file extension.
"""
figFullName = self.fileNameText.text()
fi = QtCore.QFileInfo(figFullName)
figName = fi.fileName()
figDir = fi.absoluteDir()
figShortName = fi.baseName()
figExtension = fi.completeSuffix()
# (figDir, figName) = os.path.split(str(figFullName))
# (figShortName,figExtension) = os.path.splitext(figName)
return (figDir, figShortName, figExtension)
def convert(self):
figFullName = self.fileNameText.text()
(figDir, figShortName, figExtension) = self.getCanonizeFigName()
# QDir.setCurrent (QString path)
# We use the strip function to remove white spaces
psfrag = self.inputPsfragReplacements.toPlainText()
if psfrag.simplified().isEmpty():
self.statusBar().showMessage("Warning: Psfrag Replacements Empty", 3000)
figNameNoExt = figDir.absolutePath() + "/" + figShortName
exit_code = psfrag_replace(str(figNameNoExt), str(psfrag.toUtf8()))
if(exit_code != 0):
self.statusBar().showMessage("Conversion problems", 3000)
else:
self.statusBar().showMessage("Conversion Finished", 3000)
#
#
#
#
# def closeEvent(self, event):
# reply = QtGui.QMessageBox.question(self, 'Message',
# "Are you sure to quit?", QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
# if reply == QtGui.QMessageBox.Yes:
# event.accept()
# else:
# event.ignore()
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
main = MainWindow()
main.show()
sys.exit(app.exec_())
# eps2pdf_converter ends here
| {
"content_hash": "b2ca1b816bc68ebe8a246227484274c0",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 109,
"avg_line_length": 38.89823008849557,
"alnum_prop": 0.648959162780116,
"repo_name": "darcamo/epsfrag2pdf",
"id": "42e898d3eb3547a572593728007c3a25fee66c4b",
"size": "8838",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eps2pdf_converter_gui.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25265"
},
{
"name": "TeX",
"bytes": "21"
}
],
"symlink_target": ""
} |
from compiler.compiler import LambdaCompiler
def main():
f = open('input.txt', 'r')
compiler = LambdaCompiler(f)
compiler.perform('output.py')
if __name__ == "__main__":
main()
| {
"content_hash": "5580d6e9c6369b480c92f33d73ed0735",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 44,
"avg_line_length": 21.666666666666668,
"alnum_prop": 0.6205128205128205,
"repo_name": "felipewaku/compiladores-p2",
"id": "f1eb1cfbd059baa144867bfced5658ac57b88e52",
"size": "195",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lambda_compiler/__main__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28324"
}
],
"symlink_target": ""
} |
from pprint import pprint
import matplotlib.pyplot as plt
from common import Fft
def fft_padded_plot(x):
X = Fft(x, sample_rate=8192, padded=True)
plt.figure()
plt.plot(X.hz, X.abs)
plt.xlabel('Frequency (Hz)')
plt.ylabel('|H|')
plt.savefig('q1_fft_padded.png')
def fft_not_padded_plot(x):
X = Fft(x, sample_rate=8192, padded=False)
plt.figure()
plt.plot(X.hz, X.abs)
plt.xlabel('Frequency (Hz)')
plt.ylabel('|H|')
plt.savefig('q1_fft_not_padded.png')
def time_plot(x):
plt.figure()
plt.plot(x)
plt.xlabel('Sample')
plt.ylabel('x')
plt.savefig('q1_time.png')
def fft_time():
""" Runs scipy's fft a lot of times to estimate its execution time
As we are running 10**6 times and the timeit function returns
the cumulate time, our result is the average time in microseconds,
once we would divide by 10**6 to get the average time and multiple
it back by 10**6 to get the time in microseconds :)
"""
import timeit
setup = """
from scipy.fftpack import fft
from dados import x
import numpy as np
x = np.array(x)
"""
extra_element = """
x = np.append(x, 0)
"""
padding = """
n = int(2**np.ceil(np.log2(len(x))))
x = np.pad(x, (0, n-len(x)%n), 'constant')
"""
return {
'not-padded': timeit.timeit('fft(x)', setup=setup, number=10**6),
'prime-padded': timeit.timeit('fft(x)', setup=setup+extra_element, number=10**6),
'2power-padded': timeit.timeit('fft(x)', setup=setup+padding, number=10**6),
}
if __name__ == '__main__':
from dados import x
fft_padded_plot(x)
fft_not_padded_plot(x)
time_plot(x)
pprint(fft_time())
| {
"content_hash": "29e62cf9545411fedbdfae9a31d0ed8a",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 93,
"avg_line_length": 25.602941176470587,
"alnum_prop": 0.5985066053991959,
"repo_name": "viniciusd/DCO1008---Digital-Signal-Processing",
"id": "516e1dfe050ac03edd64db2d504b0e56d14354a5",
"size": "1741",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "projeto2/question1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21977"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from .models import *
admin.site.register(Guide)
admin.site.register(Category)
admin.site.register(Document)
admin.site.register(News)
admin.site.register(Event)
admin.site.register(EventType)
admin.site.register(Field)
admin.site.register(Catalog)
admin.site.register(CatalogFile)
admin.site.register(Product)
| {
"content_hash": "8db07416c9b766dd6b305ac6a9d3addd",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 32,
"avg_line_length": 24.714285714285715,
"alnum_prop": 0.8179190751445087,
"repo_name": "javierwilson/cacaomovilcom",
"id": "3c93a58cce1ffbe287436c403b5583c72fb32a30",
"size": "346",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cacaomovilcom/portal/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "10895"
},
{
"name": "HTML",
"bytes": "43388"
},
{
"name": "JavaScript",
"bytes": "529"
},
{
"name": "Python",
"bytes": "53621"
},
{
"name": "Shell",
"bytes": "3932"
}
],
"symlink_target": ""
} |
from sys import argv
from string import strip
from os import listdir,path
from optparse import OptionParser
from datetime import datetime
import tarfile
_author__ = "Jesse Zaneveld"
__copyright__ = "Copyright 2007-2012, The Cogent Project"
__credits__ = ["Jesse Zaneveld", "Rob Knight"]
__license__ = "GPL"
__version__ = "1.5.3"
__maintainer__ = "Jesse Zaneveld"
__email__ = "zaneveld@gmail.com"
__status__ = "Development"
"""A parser for the KEGG 'ko' file
containing information on KEGG orthology groups and their associated pathways.
"""
def parse_ko_file(filepath,dir_prefix=None,debug = True):
"""Parse the NCBI KO file lines, and output several tab-delimited files
filepath - the full filepath to the input KO file from KEGG
dir_prefix - the directory to which tab-delimited output files will be saved.
debug - if set to True, pring debugging output to the screen
"""
lines = open(filepath,"U")
ko_gene_fname = 'ko_to_gene.tab'
ko_fname = 'ko.tab'
ko_pathway_fname = 'ko_to_pathway.tab'
pathway_fname = 'pathway.tab'
ko_cog_fname = 'ko_to_cog.tab'
ko_cazy_fname = 'ko_to_cazy.tab'
ko_go_fname = 'ko_to_go.tab'
fnames = [ko_gene_fname, ko_fname, ko_pathway_fname,\
pathway_fname, ko_cog_fname, ko_cazy_fname,\
ko_go_fname]
if dir_prefix:
fnames = [dir_prefix + '/' + f for f in fnames]
if debug:
for res_fp in fnames:
print "Outputting parsed info to: %s" %(res_fp)
ko_gene, ko, ko_pathway, pathway, ko_cog, ko_cazy, ko_go = \
[open(i, 'w') for i in fnames]
#figure out what fields we want (and get them), and get pathway data
fields = ['ENTRY', 'NAME', 'DEFINITION']
ko_to_pathway = {}
for rec in parse_ko(lines):
ko.write('\t'.join([rec.get(f,'') for f in fields]))
ko.write('\n')
entry = rec['ENTRY']
if 'GENES' not in rec:
continue #apparently, some records don't have genes...
genes = rec['GENES']
for species, gene_list in genes.items():
for g in gene_list:
ko_gene.write('%s\t%s:%s\n' % (entry, species.lower(), g))
if 'CLASS' not in rec:
continue #apparently they also lack classes...
ko_to_pathway[entry] = rec['CLASS']
dblinks = rec.get('DBLINKS', None)
if dblinks:
cogs = dblinks.get('COG', None)
cazy = dblinks.get('CAZy', None)
go = dblinks.get('GO', None)
if cogs:
for c in cogs:
ko_cog.write("%s\t%s\n" % (entry, c))
if go:
for g in go:
ko_go.write("%s\t%s\n" % (entry, g))
if cazy:
for c in cazy:
ko_cazy.write("%s\t%s\n" % (entry,c))
#postprocess the ko_to_pathway data to find out what the pathway terms
#are and to write them out into a join file
max_terms = 10
unique_recs = {} #will hold tuple(fields) -> unique_id
curr_uid = 0
for ko, classes in ko_to_pathway.items():
for (id_, fields) in classes:
if fields not in unique_recs:
unique_recs[fields] = curr_uid
fields_for_output = fields[:]
if len(fields_for_output) > max_terms:
fields_for_output = fields_for_output[:max_terms]
elif len(fields_for_output) < max_terms:
fields_for_output += \
('',)*(max_terms - len(fields_for_output))
pathway.write('\t'.join((str(curr_uid),str(id_)) +\
fields_for_output)+'\n')
curr_uid += 1
uid = unique_recs[fields]
ko_pathway.write(str(ko)+ '\t'+ str(uid) + '\n')
def make_tab_delimited_line_parser(columns_to_convert):
"""Generates a function that parses a tab-delimited line
columns_to_convert: a list of column indexes to convert into integers
by splitting on ':' and taking the second entry (e.g. to convert listings
like GO:0008150 to 0008150 or ncbi-gi:14589889 to 14589889)"""
def parse_tab_delimited_line(line):
"""Parse a tab-delimited line taking only the second item of cols %s""" %\
str(columns_to_convert)
fields = line.split("\t")
for index in columns_to_convert:
fields[index] = fields[index].split(":")[1]
return "\t".join(fields)
return parse_tab_delimited_line
def ko_default_parser(lines):
"""Handle default KEGG KO entry lines
lines -- default format of space separated lines.
Examples include the NAME and DEFINITION
entries
Strips out newlines and joins lines together."""
return ' '.join(map(strip, lines)).split(None, 1)[1]
def ko_first_field_parser(lines):
"""Handles KEGG KO entries where only the first field is of interest
For example, ENTRY fields like:
'ENTRY K01559 KO\n'
Strips out newlines and joins lines together for the first field only."""
return ' '.join(map(strip, lines)).split()[1]
def delete_comments(line):
"""Deletes comments in parentheses from a line."""
fields = line.split(')')
result = []
for f in fields:
if '(' in f:
result.append(f.split('(',1)[0])
else:
result.append(f)
return ''.join(result)
def ko_colon_fields(lines, without_comments=True):
"""Converts line to (key, [list of values])
lines -- colon fields such as DBLINKS or GENES
in the KEGG KO file.
Example:
' BXE: Bxe_B0037 Bxe_C0683 Bxe_C1002 Bxe_C1023\n'
"""
merged = ' '.join(map(strip, lines))
if without_comments:
merged = delete_comments(merged)
key, remainder = merged.split(':',1)
vals = remainder.split()
return key, vals
def ko_colon_delimited_parser(lines, without_comments=True):
"""For lines of the form LABEL: id: values.
Returns dict of id:values.
"""
first_line = lines[0]
without_first_field = first_line.split(None, 1)[1]
data_start = len(first_line) - len(without_first_field)
result = {}
curr = []
for line in lines:
line = line[data_start:]
if line[0] != ' ': #start of new block
if curr:
key, vals = ko_colon_fields(curr, without_comments)
result[key] = vals
curr = []
curr.append(line)
if curr:
key, vals = ko_colon_fields(curr, without_comments)
result[key] = vals
return result
def _is_new_kegg_rec_group(prev, curr):
"""Check for irregular record group terminators"""
return curr[0].isupper() and not prev.endswith(';') and \
not curr.startswith('CoA biosynthesis') and not prev.endswith(' and') and \
not prev.endswith('-') and not prev.endswith(' in') and not \
prev.endswith(' type') and not prev.endswith('Bindng') and not \
prev.endswith('Binding')
def group_by_end_char(lines, end_char = ']', \
is_new_rec=_is_new_kegg_rec_group):
"""Yields successive groups of lines that end with the specified char.
Note: also returns the last group of lines whether or not the end char
is present.
"""
curr_lines = []
prev_line = ''
for line in lines:
stripped = line.strip()
#unfortunately, not all records in kegg actually end with the
#terminator, so need to check for termination condition
if is_new_rec(prev_line, stripped):
if curr_lines:
yield curr_lines
curr_lines = []
#if the line ends with the character we're looking for, assume we've
#found a new record
if stripped.endswith(end_char):
yield curr_lines + [line]
curr_lines = []
else:
curr_lines.append(line)
prev_line = stripped
if curr_lines:
yield curr_lines
def class_lines_to_fields(lines):
"""Converts a list of lines in a single pathway within one KO class definition.
"""
rec = ' '.join(map(strip, lines))
#need to split off the class declaration if it is present
if rec.startswith('CLASS'):
rec = rec.split(None,1)[1]
#figure out if it has an id and process accordingly
if rec.endswith(']'):
rec, class_id = rec.rsplit('[', 1)
class_id = class_id[:-1]
else:
class_id = None
rec_fields = map(strip, rec.split(';'))
return class_id, tuple(rec_fields)
def ko_class_parser(lines, without_comments='ignored'):
"""For the CLASS declaration lines.
These take the form of multi-line semicolon-delimited fields (where
each field is a successive entry in the KEGG pathway hierarchy), ending
in a field of the form [PATH:ko00071].
Strategy:
- iterate over groups of lines that end in ] (each represents one pathway)
- for each line:
- split off and extract the pathway id
- split the rest of the terms on semicolon
- return a tuple of (pathway_id, [terms_in_order])
Don't consolidate the terms in this parser because each KO group has
its own class declaration so we would have to merge them for each class:
instead, merge at higher level.
"""
for group in group_by_end_char(lines):
yield class_lines_to_fields(group)
def parse_ko(lines):
"""Parses a KO record into fields."""
# Here we define records by their category
# to allow parsers to be reused on
# similar entries.
default_fields = ['NAME', 'DEFINITION']
colon_fields = ['DBLINKS', 'GENES']
first_field_only = ['ENTRY']
class_fields = ['CLASS']
for rec in ko_record_iterator(lines):
split_fields = ko_record_splitter(rec)
result = {}
for k, v in split_fields.items():
if k in default_fields:
result[k] = ko_default_parser(v)
elif k in colon_fields:
result[k] = ko_colon_delimited_parser(v)
elif k in first_field_only:
result[k] = ko_first_field_parser(v)
elif k in class_fields:
result[k] = list(ko_class_parser(v))
yield result
#parse_ko: lightweight standalone ko parser
def ko_record_iterator(lines):
"""Iterates over KO records, delimited by '///'"""
curr = []
for line in lines:
if line.startswith('///') and curr:
yield curr
curr = []
else:
curr.append(line)
if curr:
yield curr
def ko_record_splitter(lines):
"""Splits KO lines into dict of groups keyed by type."""
result = {}
curr_label = None
curr = []
i = 0
for line in lines:
i+= 1
if line[0] != ' ':
if curr_label is not None:
result[curr_label] = curr
fields = line.split(None, 1)
# Annoyingly, can have blank REFERENCE lines
# Lacking PMID, these still have auth/title info, however...
if len(fields) == 1:
curr_label = fields[0]
curr_line = ''
else:
curr_label, curr_line = fields
curr = [line]
else:
curr.append(line)
if curr:
result[curr_label] = curr
return result
if __name__ == '__main__':
from sys import argv
filename = argv[1]
out_dir = argv[2]
parse_ko_file(filename, \
dir_prefix = out_dir, \
debug = True)
| {
"content_hash": "a57e3e8733adde0a1caa9aea78a93921",
"timestamp": "",
"source": "github",
"line_count": 342,
"max_line_length": 83,
"avg_line_length": 33.953216374269005,
"alnum_prop": 0.5788839131932484,
"repo_name": "sauloal/cnidaria",
"id": "add232f58dc333d5dbb4b0a40c65d46eb02d31f6",
"size": "11635",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/venv/lib/python2.7/site-packages/cogent/parse/kegg_ko.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1696790"
},
{
"name": "C++",
"bytes": "3035466"
},
{
"name": "CSS",
"bytes": "20306"
},
{
"name": "FORTRAN",
"bytes": "3707"
},
{
"name": "Groff",
"bytes": "32478"
},
{
"name": "HTML",
"bytes": "19658"
},
{
"name": "JavaScript",
"bytes": "250616"
},
{
"name": "Jupyter Notebook",
"bytes": "8401292"
},
{
"name": "M4",
"bytes": "3905"
},
{
"name": "Makefile",
"bytes": "177650"
},
{
"name": "Objective-C",
"bytes": "1701"
},
{
"name": "Python",
"bytes": "28122291"
},
{
"name": "R",
"bytes": "86108"
},
{
"name": "Shell",
"bytes": "676123"
}
],
"symlink_target": ""
} |
from model.contact import Contact
from random import randrange
def test_delete_some_contact(app):
if app.contact.count() == 0:
app.contact(Contact(Firstname="First name", Lastname="Last name", address="ddsdsf", homephone="NIckname", mobilephone="Corpo",
secondaryphone="698998657",
email="wcia@radiowy.net"))
old_contacts = app.contact.get_contact_list()
index = randrange(len(old_contacts))
app.contact.delete_contact_by_index(index)
new_contacts = app.contact.get_contact_list()
assert len(old_contacts) - 1 == len(new_contacts)
old_contacts[index:index+1] = []
assert old_contacts == new_contacts | {
"content_hash": "99be3423f728a59f24cbf47259993182",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 134,
"avg_line_length": 42.0625,
"alnum_prop": 0.6775631500742942,
"repo_name": "erybak90/Szkolenie-python",
"id": "faf57db1ec3be44d4bd5da365f970f882033b816",
"size": "673",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_contact/test_del_contact.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "28461"
}
],
"symlink_target": ""
} |
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Fisher'] , ['MovingMedian'] , ['Seasonal_MonthOfYear'] , ['NoAR'] ); | {
"content_hash": "cd617ef1790155a75406c73cf0684d2d",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 91,
"avg_line_length": 41,
"alnum_prop": 0.7195121951219512,
"repo_name": "antoinecarme/pyaf",
"id": "80ca0a911bce799bf8d4e1690dd952309ab491a7",
"size": "164",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_control/detailed/transf_Fisher/model_control_one_enabled_Fisher_MovingMedian_Seasonal_MonthOfYear_NoAR.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
import requests
class Page(object):
def __init__(self, url='', src='', title=''):
self.url = url
self.src = src
self.title = title
self.body = ''
def __str__(self):
return self.url + '\n' + self.src
def to_dict(self):
obj = {}
obj['url'] = self.url
obj['src'] = self.src
obj['title'] = self.title
obj['body'] = self.body
return obj
class SearchResult(object):
def __init__(self, *args, **kwargs):
self.query = ''
self.results = []
def __unicode__(self):
return u'\n'.join([r.title + ' - ' + r.url for r in self.results])
def __str__(self):
return unicode(self).encode('utf8')
def to_dict(self):
obj = {}
obj['query'] = self.query
obj['results'] = []
for r in self.results:
obj['results'].append(r.to_dict())
return obj
| {
"content_hash": "6f6204ca6002089801fd4f42f8ffd480",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 74,
"avg_line_length": 21.651162790697676,
"alnum_prop": 0.4865735767991407,
"repo_name": "ykakihara/nlp-hackathon",
"id": "b55afa792d46f91717e980c01c35bda15efc8424",
"size": "931",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "20150823_seo_writing/kakihara/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Julia",
"bytes": "1772"
},
{
"name": "Python",
"bytes": "13816"
}
],
"symlink_target": ""
} |
"""This is the "nester.py" module and it provides one function called print_lol()
which prints lists that may or may not include nested lists."""
import sys
def print_lol(the_list, indent=False, level=0, fh=sys.stdout):
"""This function takes one positional argument called "The_list", which
is any Python list (of - possibly - nested lists). Each data item in the
provided list is (recursively) printed to the screen on it's own line.
"""
for each_item in the_list:
if isinstance(each_item, list):
print_lol(each_item, indent, level + 1, fh)
else:
if indent:
for tab_stop in range(level):
print("\t", end='', file=fh)
print(each_item, file=fh)
"""
Sample Data.
movies = ["The Holy Grail", 1975, "Terry Jones & Terry Gilliam", 91, ["Graham Chapman", ["Michael Palin", "John Cleese", "Terry Gilliam", "Eric Idle & Terry Jones"]]]
"""
| {
"content_hash": "18485bff73787d22de3f7cc1f3a1959c",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 166,
"avg_line_length": 36.42307692307692,
"alnum_prop": 0.6251319957761352,
"repo_name": "byplacebo/head-first-python",
"id": "bcd3793df8515517ae85cb2847d307578221683d",
"size": "947",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "4th/nester/nester.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "14994"
}
],
"symlink_target": ""
} |
import email
import imaplib
import os.path
import re
from airflow import LoggingMixin, AirflowException
from airflow.hooks.base_hook import BaseHook
class ImapHook(BaseHook):
"""
This hook connects to a mail server by using the imap protocol.
:param imap_conn_id: The connection id that contains the information used to authenticate the client.
:type imap_conn_id: str
"""
def __init__(self, imap_conn_id='imap_default'):
super().__init__(imap_conn_id)
self.conn = self.get_connection(imap_conn_id)
self.mail_client = imaplib.IMAP4_SSL(self.conn.host)
def __enter__(self):
self.mail_client.login(self.conn.login, self.conn.password)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.mail_client.logout()
def has_mail_attachment(self, name, mail_folder='INBOX', check_regex=False):
"""
Checks the mail folder for mails containing attachments with the given name.
:param name: The name of the attachment that will be searched for.
:type name: str
:param mail_folder: The mail folder where to look at.
:type mail_folder: str
:param check_regex: Checks the name for a regular expression.
:type check_regex: bool
:returns: True if there is an attachment with the given name and False if not.
:rtype: bool
"""
mail_attachments = self._retrieve_mails_attachments_by_name(name,
mail_folder,
check_regex,
latest_only=True)
return len(mail_attachments) > 0
def retrieve_mail_attachments(self,
name,
mail_folder='INBOX',
check_regex=False,
latest_only=False,
not_found_mode='raise'):
"""
Retrieves mail's attachments in the mail folder by its name.
:param name: The name of the attachment that will be downloaded.
:type name: str
:param mail_folder: The mail folder where to look at.
:type mail_folder: str
:param check_regex: Checks the name for a regular expression.
:type check_regex: bool
:param latest_only: If set to True it will only retrieve
the first matched attachment.
:type latest_only: bool
:param not_found_mode: Specify what should happen if no attachment has been found.
Supported values are 'raise', 'warn' and 'ignore'.
If it is set to 'raise' it will raise an exception,
if set to 'warn' it will only print a warning and
if set to 'ignore' it won't notify you at all.
:type not_found_mode: str
:returns: a list of tuple each containing the attachment filename and its payload.
:rtype: a list of tuple
"""
mail_attachments = self._retrieve_mails_attachments_by_name(name,
mail_folder,
check_regex,
latest_only)
if not mail_attachments:
self._handle_not_found_mode(not_found_mode)
return mail_attachments
def download_mail_attachments(self,
name,
local_output_directory,
mail_folder='INBOX',
check_regex=False,
latest_only=False,
not_found_mode='raise'):
"""
Downloads mail's attachments in the mail folder by its name to the local directory.
:param name: The name of the attachment that will be downloaded.
:type name: str
:param local_output_directory: The output directory on the local machine
where the files will be downloaded to.
:type local_output_directory: str
:param mail_folder: The mail folder where to look at.
:type mail_folder: str
:param check_regex: Checks the name for a regular expression.
:type check_regex: bool
:param latest_only: If set to True it will only download
the first matched attachment.
:type latest_only: bool
:param not_found_mode: Specify what should happen if no attachment has been found.
Supported values are 'raise', 'warn' and 'ignore'.
If it is set to 'raise' it will raise an exception,
if set to 'warn' it will only print a warning and
if set to 'ignore' it won't notify you at all.
:type not_found_mode: str
"""
mail_attachments = self._retrieve_mails_attachments_by_name(name,
mail_folder,
check_regex,
latest_only)
if not mail_attachments:
self._handle_not_found_mode(not_found_mode)
self._create_files(mail_attachments, local_output_directory)
def _handle_not_found_mode(self, not_found_mode):
if not_found_mode == 'raise':
raise AirflowException('No mail attachments found!')
elif not_found_mode == 'warn':
self.log.warning('No mail attachments found!')
elif not_found_mode == 'ignore':
pass # Do not notify if the attachment has not been found.
else:
self.log.error('Invalid "not_found_mode" %s', not_found_mode)
def _retrieve_mails_attachments_by_name(self, name, mail_folder, check_regex, latest_only):
all_matching_attachments = []
self.mail_client.select(mail_folder)
for mail_id in self._list_mail_ids_desc():
response_mail_body = self._fetch_mail_body(mail_id)
matching_attachments = self._check_mail_body(response_mail_body, name, check_regex, latest_only)
if matching_attachments:
all_matching_attachments.extend(matching_attachments)
if latest_only:
break
self.mail_client.close()
return all_matching_attachments
def _list_mail_ids_desc(self):
result, data = self.mail_client.search(None, 'All')
mail_ids = data[0].split()
return reversed(mail_ids)
def _fetch_mail_body(self, mail_id):
result, data = self.mail_client.fetch(mail_id, '(RFC822)')
mail_body = data[0][1] # The mail body is always in this specific location
mail_body_str = mail_body.decode('utf-8')
return mail_body_str
def _check_mail_body(self, response_mail_body, name, check_regex, latest_only):
mail = Mail(response_mail_body)
if mail.has_attachments():
return mail.get_attachments_by_name(name, check_regex, find_first=latest_only)
def _create_files(self, mail_attachments, local_output_directory):
for name, payload in mail_attachments:
if self._is_symlink(name):
self.log.error('Can not create file because it is a symlink!')
elif self._is_escaping_current_directory(name):
self.log.error('Can not create file because it is escaping the current directory!')
else:
self._create_file(name, payload, local_output_directory)
def _is_symlink(self, name):
# IMPORTANT NOTE: os.path.islink is not working for windows symlinks
# See: https://stackoverflow.com/a/11068434
return os.path.islink(name)
def _is_escaping_current_directory(self, name):
return '../' in name
def _correct_path(self, name, local_output_directory):
return local_output_directory + name if local_output_directory.endswith('/') \
else local_output_directory + '/' + name
def _create_file(self, name, payload, local_output_directory):
file_path = self._correct_path(name, local_output_directory)
with open(file_path, 'wb') as file:
file.write(payload)
class Mail(LoggingMixin):
"""
This class simplifies working with mails returned by the imaplib client.
:param mail_body: The mail body of a mail received from imaplib client.
:type mail_body: str
"""
def __init__(self, mail_body):
super().__init__()
self.mail = email.message_from_string(mail_body)
def has_attachments(self):
"""
Checks the mail for a attachments.
:returns: True if it has attachments and False if not.
:rtype: bool
"""
return self.mail.get_content_maintype() == 'multipart'
def get_attachments_by_name(self, name, check_regex, find_first=False):
"""
Gets all attachments by name for the mail.
:param name: The name of the attachment to look for.
:type name: str
:param check_regex: Checks the name for a regular expression.
:type check_regex: bool
:param find_first: If set to True it will only find the first match and then quit.
:type find_first: bool
:returns: a list of tuples each containing name and payload
where the attachments name matches the given name.
:rtype: list of tuple
"""
attachments = []
for part in self.mail.walk():
mail_part = MailPart(part)
if mail_part.is_attachment():
found_attachment = mail_part.has_matching_name(name) if check_regex \
else mail_part.has_equal_name(name)
if found_attachment:
file_name, file_payload = mail_part.get_file()
self.log.info('Found attachment: {}'.format(file_name))
attachments.append((file_name, file_payload))
if find_first:
break
return attachments
class MailPart:
"""
This class is a wrapper for a Mail object's part and gives it more features.
:param part: The mail part in a Mail object.
:type part: any
"""
def __init__(self, part):
self.part = part
def is_attachment(self):
"""
Checks if the part is a valid mail attachment.
:returns: True if it is an attachment and False if not.
:rtype: bool
"""
return self.part.get_content_maintype() != 'multipart' and self.part.get('Content-Disposition')
def has_matching_name(self, name):
"""
Checks if the given name matches the part's name.
:param name: The name to look for.
:type name: str
:returns: True if it matches the name (including regular expression).
:rtype: tuple
"""
return re.match(name, self.part.get_filename())
def has_equal_name(self, name):
"""
Checks if the given name is equal to the part's name.
:param name: The name to look for.
:type name: str
:returns: True if it is equal to the given name.
:rtype: bool
"""
return self.part.get_filename() == name
def get_file(self):
"""
Gets the file including name and payload.
:returns: the part's name and payload.
:rtype: tuple
"""
return self.part.get_filename(), self.part.get_payload(decode=True)
| {
"content_hash": "cdde0bf27c65a335c5c83fef6bfbc113",
"timestamp": "",
"source": "github",
"line_count": 297,
"max_line_length": 108,
"avg_line_length": 40.178451178451176,
"alnum_prop": 0.5595407692952317,
"repo_name": "r39132/airflow",
"id": "5b441a153faff99317dc101897b63371c1b9507f",
"size": "12745",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airflow/contrib/hooks/imap_hook.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12126"
},
{
"name": "Dockerfile",
"bytes": "4111"
},
{
"name": "HTML",
"bytes": "128531"
},
{
"name": "JavaScript",
"bytes": "22118"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "5928206"
},
{
"name": "Shell",
"bytes": "41869"
}
],
"symlink_target": ""
} |
"""Find attributes of a file other than its name.
"""
import os.path
import time
print 'File :', __file__
print 'Access time :', time.ctime(os.path.getatime(__file__))
print 'Modified time:', time.ctime(os.path.getmtime(__file__))
print 'Change time :', time.ctime(os.path.getctime(__file__))
print 'Size :', os.path.getsize(__file__) | {
"content_hash": "be8507315f7ca94b93adce44c62d52b2",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 62,
"avg_line_length": 32.18181818181818,
"alnum_prop": 0.6384180790960452,
"repo_name": "Akagi201/learning-python",
"id": "651fd54c1e69e419a839663daff6625d4880c2d0",
"size": "395",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ospath/ospath_properties.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "125"
},
{
"name": "CSS",
"bytes": "82315"
},
{
"name": "HTML",
"bytes": "16738"
},
{
"name": "JavaScript",
"bytes": "253132"
},
{
"name": "Jupyter Notebook",
"bytes": "3666"
},
{
"name": "Less",
"bytes": "2022"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Procfile",
"bytes": "21"
},
{
"name": "Python",
"bytes": "336950"
},
{
"name": "Rich Text Format",
"bytes": "49342"
},
{
"name": "Shell",
"bytes": "4498"
}
],
"symlink_target": ""
} |
import re
import sys
import time
from collections import deque
from datetime import datetime, timedelta
from logging import Logger
from threading import Event, Thread
from typing import Dict, Generator, Optional
from botocore.exceptions import ClientError
from botocore.waiter import Waiter
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator, XCom
from airflow.providers.amazon.aws.exceptions import ECSOperatorError
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
from airflow.providers.amazon.aws.hooks.logs import AwsLogsHook
from airflow.typing_compat import Protocol, runtime_checkable
from airflow.utils.session import provide_session
def should_retry(exception: Exception):
"""Check if exception is related to ECS resource quota (CPU, MEM)."""
if isinstance(exception, ECSOperatorError):
return any(
quota_reason in failure['reason']
for quota_reason in ['RESOURCE:MEMORY', 'RESOURCE:CPU']
for failure in exception.failures
)
return False
@runtime_checkable
class ECSProtocol(Protocol):
"""
A structured Protocol for ``boto3.client('ecs')``. This is used for type hints on
:py:meth:`.ECSOperator.client`.
.. seealso::
- https://mypy.readthedocs.io/en/latest/protocols.html
- https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html
"""
def run_task(self, **kwargs) -> Dict:
"""https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.run_task""" # noqa: E501
...
def get_waiter(self, x: str) -> Waiter:
"""https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.get_waiter""" # noqa: E501
...
def describe_tasks(self, cluster: str, tasks) -> Dict:
"""https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.describe_tasks""" # noqa: E501
...
def stop_task(self, cluster, task, reason: str) -> Dict:
"""https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.stop_task""" # noqa: E501
...
def describe_task_definition(self, taskDefinition: str) -> Dict:
"""https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.describe_task_definition""" # noqa: E501
...
def list_tasks(self, cluster: str, launchType: str, desiredStatus: str, family: str) -> Dict:
"""https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.list_tasks""" # noqa: E501
...
class ECSTaskLogFetcher(Thread):
"""
Fetches Cloudwatch log events with specific interval as a thread
and sends the log events to the info channel of the provided logger.
"""
def __init__(
self,
*,
aws_conn_id: Optional[str] = 'aws_default',
region_name: Optional[str] = None,
log_group: str,
log_stream_name: str,
fetch_interval: timedelta,
logger: Logger,
):
super().__init__()
self._event = Event()
self.fetch_interval = fetch_interval
self.logger = logger
self.log_group = log_group
self.log_stream_name = log_stream_name
self.hook = AwsLogsHook(aws_conn_id=aws_conn_id, region_name=region_name)
def run(self) -> None:
logs_to_skip = 0
while not self.is_stopped():
log_events = self._get_log_events(logs_to_skip)
for log_event in log_events:
self.logger.info(self._event_to_str(log_event))
logs_to_skip += 1
time.sleep(self.fetch_interval.total_seconds())
def _get_log_events(self, skip: int = 0) -> Generator:
try:
yield from self.hook.get_log_events(self.log_group, self.log_stream_name, skip=skip)
except ClientError as error:
if error.response['Error']['Code'] != 'ResourceNotFoundException':
self.logger.warning('Error on retrieving Cloudwatch log events', error)
yield from ()
def _event_to_str(self, event: dict) -> str:
event_dt = datetime.utcfromtimestamp(event['timestamp'] / 1000.0)
formatted_event_dt = event_dt.strftime('%Y-%m-%d %H:%M:%S,%f')[:-3]
message = event['message']
return f'[{formatted_event_dt}] {message}'
def get_last_log_messages(self, number_messages) -> list:
return [log['message'] for log in deque(self._get_log_events(), maxlen=number_messages)]
def get_last_log_message(self) -> Optional[str]:
try:
return self.get_last_log_messages(1)[0]
except IndexError:
return None
def is_stopped(self) -> bool:
return self._event.is_set()
def stop(self):
self._event.set()
class ECSOperator(BaseOperator):
"""
Execute a task on AWS ECS (Elastic Container Service)
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:ECSOperator`
:param task_definition: the task definition name on Elastic Container Service
:type task_definition: str
:param cluster: the cluster name on Elastic Container Service
:type cluster: str
:param overrides: the same parameter that boto3 will receive (templated):
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.run_task
:type overrides: dict
:param aws_conn_id: connection id of AWS credentials / region name. If None,
credential boto3 strategy will be used
(http://boto3.readthedocs.io/en/latest/guide/configuration.html).
:type aws_conn_id: str
:param region_name: region name to use in AWS Hook.
Override the region_name in connection (if provided)
:type region_name: str
:param launch_type: the launch type on which to run your task ('EC2' or 'FARGATE')
:type launch_type: str
:param capacity_provider_strategy: the capacity provider strategy to use for the task.
When capacity_provider_strategy is specified, the launch_type parameter is omitted.
If no capacity_provider_strategy or launch_type is specified,
the default capacity provider strategy for the cluster is used.
:type capacity_provider_strategy: list
:param group: the name of the task group associated with the task
:type group: str
:param placement_constraints: an array of placement constraint objects to use for
the task
:type placement_constraints: list
:param placement_strategy: an array of placement strategy objects to use for
the task
:type placement_strategy: list
:param platform_version: the platform version on which your task is running
:type platform_version: str
:param network_configuration: the network configuration for the task
:type network_configuration: dict
:param tags: a dictionary of tags in the form of {'tagKey': 'tagValue'}.
:type tags: dict
:param awslogs_group: the CloudWatch group where your ECS container logs are stored.
Only required if you want logs to be shown in the Airflow UI after your job has
finished.
:type awslogs_group: str
:param awslogs_region: the region in which your CloudWatch logs are stored.
If None, this is the same as the `region_name` parameter. If that is also None,
this is the default AWS region based on your connection settings.
:type awslogs_region: str
:param awslogs_stream_prefix: the stream prefix that is used for the CloudWatch logs.
This is usually based on some custom name combined with the name of the container.
Only required if you want logs to be shown in the Airflow UI after your job has
finished.
:type awslogs_stream_prefix: str
:param awslogs_fetch_interval: the interval that the ECS task log fetcher should wait
in between each Cloudwatch logs fetches.
:type awslogs_fetch_interval: timedelta
:param quota_retry: Config if and how to retry the launch of a new ECS task, to handle
transient errors.
:type quota_retry: dict
:param reattach: If set to True, will check if the task previously launched by the task_instance
is already running. If so, the operator will attach to it instead of starting a new task.
This is to avoid relaunching a new task when the connection drops between Airflow and ECS while
the task is running (when the Airflow worker is restarted for example).
:type reattach: bool
:param number_logs_exception: Number of lines from the last Cloudwatch logs to return in the
AirflowException if an ECS task is stopped (to receive Airflow alerts with the logs of what
failed in the code running in ECS).
:type number_logs_exception: int
"""
ui_color = '#f0ede4'
template_fields = ('overrides',)
template_fields_renderers = {
"overrides": "json",
"network_configuration": "json",
"tags": "json",
"quota_retry": "json",
}
REATTACH_XCOM_KEY = "ecs_task_arn"
REATTACH_XCOM_TASK_ID_TEMPLATE = "{task_id}_task_arn"
def __init__(
self,
*,
task_definition: str,
cluster: str,
overrides: dict,
aws_conn_id: Optional[str] = None,
region_name: Optional[str] = None,
launch_type: str = 'EC2',
capacity_provider_strategy: Optional[list] = None,
group: Optional[str] = None,
placement_constraints: Optional[list] = None,
placement_strategy: Optional[list] = None,
platform_version: Optional[str] = None,
network_configuration: Optional[dict] = None,
tags: Optional[dict] = None,
awslogs_group: Optional[str] = None,
awslogs_region: Optional[str] = None,
awslogs_stream_prefix: Optional[str] = None,
awslogs_fetch_interval: timedelta = timedelta(seconds=30),
propagate_tags: Optional[str] = None,
quota_retry: Optional[dict] = None,
reattach: bool = False,
number_logs_exception: int = 10,
**kwargs,
):
super().__init__(**kwargs)
self.aws_conn_id = aws_conn_id
self.region_name = region_name
self.task_definition = task_definition
self.cluster = cluster
self.overrides = overrides
self.launch_type = launch_type
self.capacity_provider_strategy = capacity_provider_strategy
self.group = group
self.placement_constraints = placement_constraints
self.placement_strategy = placement_strategy
self.platform_version = platform_version
self.network_configuration = network_configuration
self.tags = tags
self.awslogs_group = awslogs_group
self.awslogs_stream_prefix = awslogs_stream_prefix
self.awslogs_region = awslogs_region
self.awslogs_fetch_interval = awslogs_fetch_interval
self.propagate_tags = propagate_tags
self.reattach = reattach
self.number_logs_exception = number_logs_exception
if self.awslogs_region is None:
self.awslogs_region = region_name
self.hook: Optional[AwsBaseHook] = None
self.client: Optional[ECSProtocol] = None
self.arn: Optional[str] = None
self.retry_args = quota_retry
self.task_log_fetcher: Optional[ECSTaskLogFetcher] = None
@provide_session
def execute(self, context, session=None):
self.log.info(
'Running ECS Task - Task definition: %s - on cluster %s', self.task_definition, self.cluster
)
self.log.info('ECSOperator overrides: %s', self.overrides)
self.client = self.get_hook().get_conn()
if self.reattach:
self._try_reattach_task(context)
if not self.arn:
self._start_task(context)
if self._aws_logs_enabled():
self.log.info('Starting ECS Task Log Fetcher')
self.task_log_fetcher = self._get_task_log_fetcher()
self.task_log_fetcher.start()
try:
self._wait_for_task_ended()
finally:
self.task_log_fetcher.stop()
self.task_log_fetcher.join()
else:
self._wait_for_task_ended()
self._check_success_task()
self.log.info('ECS Task has been successfully executed')
if self.reattach:
# Clear the XCom value storing the ECS task ARN if the task has completed
# as we can't reattach it anymore
self._xcom_del(session, self.REATTACH_XCOM_TASK_ID_TEMPLATE.format(task_id=self.task_id))
if self.do_xcom_push and self.task_log_fetcher:
return self.task_log_fetcher.get_last_log_message()
return None
def _xcom_del(self, session, task_id):
session.query(XCom).filter(XCom.dag_id == self.dag_id, XCom.task_id == task_id).delete()
def _start_task(self, context):
run_opts = {
'cluster': self.cluster,
'taskDefinition': self.task_definition,
'overrides': self.overrides,
'startedBy': self.owner,
}
if self.capacity_provider_strategy:
run_opts['capacityProviderStrategy'] = self.capacity_provider_strategy
elif self.launch_type:
run_opts['launchType'] = self.launch_type
if self.platform_version is not None:
run_opts['platformVersion'] = self.platform_version
if self.group is not None:
run_opts['group'] = self.group
if self.placement_constraints is not None:
run_opts['placementConstraints'] = self.placement_constraints
if self.placement_strategy is not None:
run_opts['placementStrategy'] = self.placement_strategy
if self.network_configuration is not None:
run_opts['networkConfiguration'] = self.network_configuration
if self.tags is not None:
run_opts['tags'] = [{'key': k, 'value': v} for (k, v) in self.tags.items()]
if self.propagate_tags is not None:
run_opts['propagateTags'] = self.propagate_tags
response = self.client.run_task(**run_opts)
failures = response['failures']
if len(failures) > 0:
raise ECSOperatorError(failures, response)
self.log.info('ECS Task started: %s', response)
self.arn = response['tasks'][0]['taskArn']
self.ecs_task_id = self.arn.split("/")[-1]
self.log.info(f"ECS task ID is: {self.ecs_task_id}")
if self.reattach:
# Save the task ARN in XCom to be able to reattach it if needed
self._xcom_set(
context,
key=self.REATTACH_XCOM_KEY,
value=self.arn,
task_id=self.REATTACH_XCOM_TASK_ID_TEMPLATE.format(task_id=self.task_id),
)
def _xcom_set(self, context, key, value, task_id):
XCom.set(
key=key,
value=value,
task_id=task_id,
dag_id=self.dag_id,
execution_date=context["ti"].execution_date,
)
def _try_reattach_task(self, context):
task_def_resp = self.client.describe_task_definition(taskDefinition=self.task_definition)
ecs_task_family = task_def_resp['taskDefinition']['family']
list_tasks_resp = self.client.list_tasks(
cluster=self.cluster, desiredStatus='RUNNING', family=ecs_task_family
)
running_tasks = list_tasks_resp['taskArns']
# Check if the ECS task previously launched is already running
previous_task_arn = self.xcom_pull(
context,
task_ids=self.REATTACH_XCOM_TASK_ID_TEMPLATE.format(task_id=self.task_id),
key=self.REATTACH_XCOM_KEY,
)
if previous_task_arn in running_tasks:
self.arn = previous_task_arn
self.log.info("Reattaching previously launched task: %s", self.arn)
else:
self.log.info("No active previously launched task found to reattach")
def _wait_for_task_ended(self) -> None:
if not self.client or not self.arn:
return
waiter = self.client.get_waiter('tasks_stopped')
waiter.config.max_attempts = sys.maxsize # timeout is managed by airflow
waiter.wait(cluster=self.cluster, tasks=[self.arn])
return
def _aws_logs_enabled(self):
return self.awslogs_group and self.awslogs_stream_prefix
def _get_task_log_fetcher(self) -> ECSTaskLogFetcher:
log_stream_name = f"{self.awslogs_stream_prefix}/{self.ecs_task_id}"
return ECSTaskLogFetcher(
aws_conn_id=self.aws_conn_id,
region_name=self.awslogs_region,
log_group=self.awslogs_group,
log_stream_name=log_stream_name,
fetch_interval=self.awslogs_fetch_interval,
logger=self.log,
)
def _check_success_task(self) -> None:
if not self.client or not self.arn:
return
response = self.client.describe_tasks(cluster=self.cluster, tasks=[self.arn])
self.log.info('ECS Task stopped, check status: %s', response)
if len(response.get('failures', [])) > 0:
raise AirflowException(response)
for task in response['tasks']:
if task.get('stopCode', '') == 'TaskFailedToStart':
raise AirflowException(f"The task failed to start due to: {task.get('stoppedReason', '')}")
# This is a `stoppedReason` that indicates a task has not
# successfully finished, but there is no other indication of failure
# in the response.
# https://docs.aws.amazon.com/AmazonECS/latest/developerguide/stopped-task-errors.html
if re.match(r'Host EC2 \(instance .+?\) (stopped|terminated)\.', task.get('stoppedReason', '')):
raise AirflowException(
'The task was stopped because the host instance terminated: {}'.format(
task.get('stoppedReason', '')
)
)
containers = task['containers']
for container in containers:
if container.get('lastStatus') == 'STOPPED' and container['exitCode'] != 0:
if self.task_log_fetcher:
last_logs = "\n".join(
self.task_log_fetcher.get_last_log_messages(self.number_logs_exception)
)
raise AirflowException(
f"This task is not in success state - last {self.number_logs_exception} "
f"logs from Cloudwatch:\n{last_logs}"
)
else:
raise AirflowException(f'This task is not in success state {task}')
elif container.get('lastStatus') == 'PENDING':
raise AirflowException(f'This task is still pending {task}')
elif 'error' in container.get('reason', '').lower():
raise AirflowException(
'This containers encounter an error during launching : {}'.format(
container.get('reason', '').lower()
)
)
def get_hook(self) -> AwsBaseHook:
"""Create and return an AwsHook."""
if self.hook:
return self.hook
self.hook = AwsBaseHook(aws_conn_id=self.aws_conn_id, client_type='ecs', region_name=self.region_name)
return self.hook
def on_kill(self) -> None:
if not self.client or not self.arn:
return
if self.task_log_fetcher:
self.task_log_fetcher.stop()
response = self.client.stop_task(
cluster=self.cluster, task=self.arn, reason='Task killed by the user'
)
self.log.info(response)
| {
"content_hash": "afbe5899ff44dbbe0a4b86e105ff54b0",
"timestamp": "",
"source": "github",
"line_count": 493,
"max_line_length": 147,
"avg_line_length": 40.8498985801217,
"alnum_prop": 0.6263468891206118,
"repo_name": "apache/incubator-airflow",
"id": "f560dff4e74714acce3a650d62f2c8c783e217cb",
"size": "20926",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "airflow/providers/amazon/aws/operators/ecs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "69070"
},
{
"name": "Dockerfile",
"bytes": "2001"
},
{
"name": "HTML",
"bytes": "283783"
},
{
"name": "JavaScript",
"bytes": "1387552"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "5482822"
},
{
"name": "Shell",
"bytes": "40957"
}
],
"symlink_target": ""
} |
import logging
from typing import List, Optional, TYPE_CHECKING
from sqlalchemy.exc import SQLAlchemyError
from superset.charts.filters import ChartFilter
from superset.dao.base import BaseDAO
from superset.extensions import db
from superset.models.core import FavStar, FavStarClassName
from superset.models.slice import Slice
if TYPE_CHECKING:
from superset.connectors.base.models import BaseDatasource
logger = logging.getLogger(__name__)
class ChartDAO(BaseDAO):
model_cls = Slice
base_filter = ChartFilter
@staticmethod
def bulk_delete(models: Optional[List[Slice]], commit: bool = True) -> None:
item_ids = [model.id for model in models] if models else []
# bulk delete, first delete related data
if models:
for model in models:
model.owners = []
model.dashboards = []
db.session.merge(model)
# bulk delete itself
try:
db.session.query(Slice).filter(Slice.id.in_(item_ids)).delete(
synchronize_session="fetch"
)
if commit:
db.session.commit()
except SQLAlchemyError as ex:
if commit:
db.session.rollback()
raise ex
@staticmethod
def save(slc: Slice, commit: bool = True) -> None:
db.session.add(slc)
if commit:
db.session.commit()
@staticmethod
def overwrite(slc: Slice, commit: bool = True) -> None:
db.session.merge(slc)
if commit:
db.session.commit()
@staticmethod
def favorited_ids(charts: List[Slice], current_user_id: int) -> List[FavStar]:
ids = [chart.id for chart in charts]
return [
star.obj_id
for star in db.session.query(FavStar.obj_id)
.filter(
FavStar.class_name == FavStarClassName.CHART,
FavStar.obj_id.in_(ids),
FavStar.user_id == current_user_id,
)
.all()
]
| {
"content_hash": "e0f53c9064db127589b17076761d0ee2",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 82,
"avg_line_length": 30.44776119402985,
"alnum_prop": 0.5936274509803922,
"repo_name": "apache/incubator-superset",
"id": "8e16f3b445b49e3855f93843f5d7a2084b86e86c",
"size": "2861",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "superset/charts/dao.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "156654"
},
{
"name": "Dockerfile",
"bytes": "4402"
},
{
"name": "HTML",
"bytes": "125147"
},
{
"name": "JavaScript",
"bytes": "2357868"
},
{
"name": "Mako",
"bytes": "1197"
},
{
"name": "Python",
"bytes": "2295567"
},
{
"name": "Shell",
"bytes": "29230"
},
{
"name": "Smarty",
"bytes": "1826"
},
{
"name": "TypeScript",
"bytes": "94013"
}
],
"symlink_target": ""
} |
import os
import shutil
import numpy as np
import sys
from setup import params
from plots import plots
from compute_lines import compute_lines
import read_fortran as rf
import test_rhd as test_rhd
def set_params():
"""Reads the param file and returns the parameters"""
print '\nSetup.py: '
input_file = params['input_file']
print '- Input file: {}'.format(input_file)
output_file = params['output_file']
print '- Output filename: {}'.format(output_file + '.dat')
testing_rhd = params['test_rhd']
print '- Test RHD simulation: {}'.format(testing_rhd)
if not (testing_rhd in [0, 1]):
raise RuntimeError('Wrong input for testing_rhd parameter')
binary_output_file = params['binary_output_file']
print '- Binary output file: {}'.format(binary_output_file)
if not (binary_output_file in [0, 1]):
raise RuntimeError('Wrong input for binary_output_file parameter')
tstep = params['tstep']
print '- Time step fraction: {}'.format(tstep)
itemax = params['itemax']
print '- Maximum steps per line: {}'.format(itemax)
resamp = params['resamp']
print '- Resampling value: {}'.format(resamp)
nlines = params['nlines']
print '- Number of lines: {}'.format(nlines)
if input_file != 'JET' and nlines != -1:
print "WARNING: nlines only implemented for 'JET' simulations"
CGS_units = params['CGS_units']
print '- CGS units: {}'.format(CGS_units)
if not (CGS_units in [0, 1]):
raise RuntimeError('Wrong input for CGS_units parameter')
fB0 = params['fB0']
print '- B0 internal energy fraction: {}'.format(fB0)
tr0 = params['tr0']
print '- Tracer cut to avoid mixing: {}'.format(tr0)
int_method = params['int_method']
print '- Interpolation method: {}'.format(int_method)
if not (int_method in [0, 1, 2]):
raise RuntimeError('Wrong input for int_method parameter')
int_test = params['int_test']
print '- Interpolation test: {}'.format(int_test)
if not (int_test in [0, 1, 2, 3]):
raise RuntimeError('Wrong input for int_test parameter')
make_plots = params['make_plots']
print '- Make plots: {}'.format(make_plots)
if not (make_plots in [0, 1]):
raise RuntimeError('Wrong input for make_plots parameter')
plots_path = params['plots_path']
print '- Plots path: {}'.format(plots_path)
plot_maps = params['plot_maps']
print '- Plot maps: {}'.format(plot_maps)
if not (plot_maps in [0, 1]):
raise RuntimeError('Wrong input for plot_maps parameter')
plot_lines = params['plot_lines']
print '- Plot lines: {}'.format(plot_lines)
if not (plot_lines in [0, 1]):
raise RuntimeError('Wrong input for plot_lines parameter')
plot_profiles = params['plot_profiles']
print '- Plot profiles: {}'.format(plot_profiles)
if not (plot_profiles in [0, 1]):
raise RuntimeError('Wrong input for plot_profiles parameter')
return input_file, output_file, testing_rhd, binary_output_file, tstep, itemax,\
resamp, nlines, CGS_units, fB0, tr0, int_method, int_test, make_plots,\
plot_maps, plot_lines, plot_profiles, plots_path
def make_folder(f):
if not os.path.exists(f):
os.makedirs(f)
else:
shutil.rmtree(f)
os.makedirs(f)
def prepare_data(input_file, nlines, testing_rhd):
"""Read the data file given by input_file. Being:
(nx, ny): dimensions of the grid
(x, y): array containing the coordinates of the center of the cell
(xl, yl): array containing the coordinates of the left of the cell
(xr, yr): array containing the coordinates of the right of the cell
(dx, dy): array containing the cell sizes
(vx, vy): array containing the velocity components
dens: array containing the density
eps: array containing the specific interal energy
injec: array containing the (i, j) indicies of the injector cells
(lx, ly): grid size
a: distance unit
rho0: density unit
c: light velocity
gammaad: adiabatic coeficient
Note: to set a different input file modify this function but
return the same variables"""
c = 3e10
(nx, ny, dens, vx, vy, eps, xl, yl, lx, ly, gammaad, rho0, a, xp,
yp, rin, rins, tracer, time1,
rhowp, uwp, vwp, rhowi, uwi, vwi, xs, ys) = rf.read(input_file)
if testing_rhd == 1:
print '\nTesting the RHD simulation:'
if input_file == 'PULSAR':
(i_sw, j_sw) = (xs*nx/lx+2, ly*ny/ly-2)
#(i_pw, j_pw) = (xp*nx/lx+2, (yp+rin)*ny/ly+2)
(i_pw, j_pw) = (xp*nx/lx+2, (yp-rin)*ny/ly-2)
test_rhd.pulsar(i_pw, j_pw, i_sw, j_sw, a, rho0, xp, yp, xs, ys,
dens, eps, vx, vy, xl, yl, gammaad, c, lx, ly,
nx, ny, 1,
rhowp, uwp, vwp, rhowi, uwi, vwi, rin, rins)
test_rhd.pulsar(i_pw, j_pw, i_sw, j_sw, a, rho0, xp, yp, xs, ys,
dens, eps, vx, vy, xl, yl, gammaad, c, lx, ly,
nx, ny, 0,
rhowp, uwp, vwp, rhowi, uwi, vwi, rin, rins)
elif input_file == 'JET':
(i_sw, j_sw) = (xs*nx/lx+2, (ys-rins)*ny/ly-2)
(i_jw, j_jw) = (xp*nx/lx+2, (yp+10)*ny/ly+2)
test_rhd.jet(i_jw, j_jw, i_sw, j_sw, a, rho0, xp, yp, xs, ys,
dens, eps, vx, vy, xl, yl, gammaad, c, lx, ly,
nx, ny, 1,
rhowp, uwp, vwp, rhowi, uwi, vwi, rin, rins)
test_rhd.jet(i_jw, j_jw, i_sw, j_sw, a, rho0, xp, yp, xs, ys,
dens, eps, vx, vy, xl, yl, gammaad, c, lx, ly,
nx, ny, 0,
rhowp, uwp, vwp, rhowi, uwi, vwi, rin, rins)
print '\nLines not computed\nSTOP\n'
sys.exit(1)
x, y, xr, yr, dx, dy, div = rf.build_grid(xl, yl, nx, ny, lx, ly, vx, vy)
injec, sf0 = rf.injection(nx, ny, x, y, xp, yp, rin, input_file,
nlines, xl, xr)
return nx, ny, x, y, xl, yl, xr, yr, dx, dy, vx, vy,\
dens, eps, injec, sf0, lx, ly, a, rho0, c, gammaad,\
div, tracer, time1
def time_step(dx, dy, vx, vy, tstep):
"""Returns the time step"""
v = np.sqrt(vx**2.+vy**2.)
if dx.min() < dy.min():
ds = dx.min()
else:
ds = dy.min()
tstep = tstep*ds/v.max()
return tstep, ds
def print_info(nx, ny, lx, ly, dx, dy, tstep, a, c, rho0, time1):
"""Print some information on the screen"""
print '\nSimulation info:'
print '- Grid dimensions: {:} x {:} cells'.format(nx, ny)
print '- Grid size: {:.0f} x {:.0f} [a]'.format(lx, ly)
print '- Cell size: {:} x {:} [a]'.format(dx[1], dy[1])
print '- Time step size: {:.2f} [t0]'.format(tstep)
print '- Simulated time: {:.2f} [t0]'.format(time1)
print '- Simulated time: {:.2e} [s]'.format(time1*a/c)
print '- Code units of time t0={:.2f} [s]'.format(a/c)
print '- Code units of distance a={:.0e} [cm]'.format(a)
print '- Code units of density rho0={} [g/cm^3]'.format(rho0)
print '- Code units of velocity c={:.0e} [cm/s]'.format(c)
print '- Code units of specific internal energy eps0=c^2'
print '- Equation of state pres=(gammaad-1)*dens*eps'
return
def save_binary(output_file, all_lines):
"""Save to a .npy binary file"""
# To read the numpy binary file:
# all_lines = np.load(output_file)
# for line in all_lines:
# x, y, i, j, dens, eps, vx, vy, div, time = zip(*line)
np.save(output_file + '.npy', all_lines)
return
def main():
"""Compute the current lines from RHD data"""
try:
make_folder('plots/')
make_folder('lines/')
(input_file, output_file, testing_rhd, binary_output_file, tstep, itemax,
resamp, nlines, CGS_units, fB0, tr0, int_method, int_test, make_plots,
plot_maps, plot_lines, plot_profiles,
plots_path) = set_params()
(nx, ny, x, y, xl, yl, xr, yr, dx, dy, vx, vy, dens, eps,
injec, sf0, lx, ly, a, rho0, c, gammaad,
div, tracer, time1) = prepare_data(input_file, nlines, testing_rhd)
tstep, ds = time_step(dx, dy, vx, vy, tstep)
print_info(nx, ny, lx, ly, dx, dy, tstep, a, c, rho0, time1)
all_lines = compute_lines(x, y, xl, yl, xr, yr,
vx, vy, dens, eps, tracer,
injec, sf0, tstep,
nx, ny, lx, ly, dx, dy, gammaad, div,
itemax, resamp, int_method, int_test,
CGS_units, c, rho0, a, fB0, tr0,
input_file, output_file)
if binary_output_file == 1:
save_binary(output_file, all_lines)
if make_plots == 1:
plots(x, y, dens, eps, vx, vy, div, plots_path, all_lines,
plot_maps, plot_lines, plot_profiles, CGS_units, lx, ly,
a, rho0, c, gammaad, tracer)
return 0
except Exception, err:
sys.stderr.write('\nERROR: %s\n' % str(err))
return 1
if __name__ == '__main__':
os.system('clear')
print '\nRunning PyLines.py'
os.system('rm -rf lines/current_lines_*')
main()
| {
"content_hash": "14b5988bf2cca961d40da3c2c8949765",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 84,
"avg_line_length": 36.14230769230769,
"alnum_prop": 0.5594338618708098,
"repo_name": "xparedesfortuny/pylines",
"id": "aa1faaadc8d775989aa941aed1ed9b00045b0f05",
"size": "9490",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pylines.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "82234"
}
],
"symlink_target": ""
} |
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class ComputeManagementClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for ComputeManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Subscription credentials which uniquely identify Microsoft Azure
subscription. The subscription ID forms part of the URI for every service call. Required.
:type subscription_id: str
:keyword api_version: Api Version. Default value is "2016-03-30". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(self, credential: "AsyncTokenCredential", subscription_id: str, **kwargs: Any) -> None:
super(ComputeManagementClientConfiguration, self).__init__(**kwargs)
api_version = kwargs.pop("api_version", "2016-03-30") # type: str
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.api_version = api_version
self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"])
kwargs.setdefault("sdk_moniker", "mgmt-compute/{}".format(VERSION))
self._configure(**kwargs)
def _configure(self, **kwargs: Any) -> None:
self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get("http_logging_policy") or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get("authentication_policy")
if self.credential and not self.authentication_policy:
self.authentication_policy = AsyncARMChallengeAuthenticationPolicy(
self.credential, *self.credential_scopes, **kwargs
)
| {
"content_hash": "ae59b6fd6fdb4e8c611dcdc56961d230",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 107,
"avg_line_length": 54.101694915254235,
"alnum_prop": 0.7233709273182958,
"repo_name": "Azure/azure-sdk-for-python",
"id": "4c1fdb009898fecc62b06dd7c9c3139ca3b2ac78",
"size": "3660",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2016_03_30/aio/_configuration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
'''
Weather station:
One script to rule them all...
HMH - 18/07/2018
'''
import sys,time,os
import Adafruit_DHT, Adafruit_MCP3008
import Adafruit_GPIO.SPI as SPI
import RPi.GPIO as GPIO
import spidev
import numpy as np
from gpiozero import DigitalInputDevice
from time import sleep
#import math
#import subprocess
import datetime,requests,json
import smtplib
from email.mime.text import MIMEText
import simple_read_windspeed as srw
#import analog_read as ar
try:
import aqi
except:
print 'USB not connected'
import platform, string
def sendemail(from_addr, to_addr_list,
subject, message,
login, password,
smtpserver='smtp.gmail.com:587'):
header = 'From: %s\n' % from_addr
header += 'To: %s\n' % ','.join(to_addr_list)
header += 'Subject: %s\n\n' % subject
message = header + message
server = smtplib.SMTP(smtpserver)
server.starttls()
server.login(login,password)
problems = server.sendmail(from_addr, to_addr_list, message)
server.quit()
return problems
def get_temp_hum(sensor,pin):
t_array = np.zeros(10)
h_array = np.zeros(10)
for i in range(0,len(t_array)):
h_array[i], t_array[i] = Adafruit_DHT.read_retry(sensor, pin)
humidity = np.median(h_array)
temperature = np.median(t_array)
return humidity, temperature
def windspeed_helper():
count = 0
wind_speed_sensor = srw.DigitalInputDevice(5)
wind_speed_sensor.when_activated = srw.spin
time_interval = 0.5*60 # seconds
time_later = time.time()
timestamp = time.time()
wind_array = []
while time_later < timestamp + time_interval:
srw.count = 0
srw.sleep(5)
instantaneous_windspeed = srw.get_windspeed()
if count == 1:
instantaneous_windspeed = 0.0
wind_array.append(instantaneous_windspeed)
time_later = time.time()
#windspeed = srw.calculate_speed(5)
#wind_array = simple_read_windspeed.wind_val
#windspeed = np.mean(wind_array)
#print "value from anemometer: ",wind_array
return wind_array
def dust_helper():
pm25 = []
pm10 = []
aqi.cmd_set_sleep(0)
aqi.cmd_set_mode(1);
for t in range(15):
values = aqi.cmd_query_data();
if values is not None:
pm25.append(values[0])
pm10.append(values[1])
time.sleep(2)
#print pm10
#print pm25
#print("Going to sleep for 5min...")
aqi.cmd_set_mode(0);
aqi.cmd_set_sleep()
#time.sleep(300)
return pm10,pm25
def read_analog(numSamples,pinVal):
#Hardware SPI configuration:
SPI_PORT = 0
SPI_DEVICE = 0
mcp = Adafruit_MCP3008.MCP3008(spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE))
# Choose GPIO pin - not actually sure if we need this, but leaving it in for meow
ledPin = 18
GPIO.setmode(GPIO.BCM)
GPIO.setup(ledPin,GPIO.OUT)
samplingTime = 280.0
deltaTime = 40.0
sleepTime = 9680.0
return_array = []
try:
for i in range(0,numSamples):
GPIO.output(ledPin,0)
time.sleep(samplingTime*10.0**-6)
# The read_adc function will get the value of the specified channel
voMeasured = mcp.read_adc(pinVal)
time.sleep(samplingTime*10.0**-6)
GPIO.output(ledPin,1)
time.sleep(samplingTime*10.0**-6)
calcVoltage = voMeasured*(5.0/1024)
return_array.append(calcVoltage)
time.sleep(1)
except KeyboardInterrupt:
GPIO.cleanup()
return return_array
if __name__=="__main__":
error_log_name = 'error_log.txt'
erf = open(error_log_name,'a')
myname = os.uname()[1]
try:
# Send email to let human know I'm alive
sendemail(from_addr = 'oddweatherstation@gmail.com',
to_addr_list = ['heiko@opendata.durban'],
subject = 'System has restarted',
message = 'Weather station '+myname+' has rebooted and the script is running!',
login = 'oddweatherstation',
password = 'winteriscoming')
except Exception as e:
print "Gmail doesn't like the machine"
etime = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
erf.write(etime)
erf.write('\n')
erf.write(str(e))
erf.write('\n')
erf.close()
print "Welcome to your local weather station. Sit back, relax, and have the weather measured at you. Some of the measurements take some time, so if it looks like nothing is happening, chill for a while. If nothing continues to happen, then perhaps something strange is on your foot."
# set operations flags:
Temp_flag = 0
WS_flag = 0
WD_flag = 0
Gas_flag = 0
Dust_flag = 0
data_loc = '/home/pi/Desktop/Weather_Station/data/'
p = platform.system()
if p == 'Windows':
data_loc = string.replace(data_loc,'/','\\')
Zuma = 'notmypresident'
while Zuma == 'notmypresident': #notmypresident
timestamp = time.time() # UTC
file_time = datetime.datetime.fromtimestamp(timestamp).strftime('%Y_%m_%d_%H_%M_%S')
file_name = data_loc+'data_'+file_time+'.txt'
f = open(file_name,'a')
erf = open(error_log_name,'a')
time_interval = 24*60*60 # seconds
time_later = time.time()
while time_later < timestamp + time_interval:
# Temperature and humidity:
m_time = time.time()
print "The time is...:", m_time
print "Yeah... bet you can read that..."
print "Checking temperature and humidity"
try:
sensor2 = Adafruit_DHT.DHT22
pin2=24
humidity, temperature = get_temp_hum(sensor2,pin2)
print 'Temp={0:0.1f}*C Humidity={1:0.1f}%'.format(temperature, humidity)
except Exception as e:
print 'Failed to get temperature and humidity reading'
etime = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
erf.write(etime)
erf.write('\n')
erf.write(str(e))
erf.write('\n')
if Temp_flag == 0:
try:
sendemail(from_addr = 'oddweatherstation@gmail.com',
to_addr_list = ['heiko@opendata.durban'],
subject = 'Temperature sensor down',
message = 'Weather station '+myname+' temperature gauge is not working',
login = 'oddweatherstation',
password = 'winteriscoming')
Temp_flag = 1
except:
print "Gmail doesn't like the machine"
# Gas
print "Smelling gas"
try:
gas_array = read_analog(numSamples=10,pinVal=1)
#print gas_array
gas = np.mean(gas_array)
print 'Gas = ',gas
except Exception as e:
print "We have a gas issue..."
etime = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
erf.write(etime)
erf.write('\n')
erf.write(str(e))
erf.write('\n')
if Gas_flag == 0:
try:
sendemail(from_addr = 'oddweatherstation@gmail.com',
to_addr_list = ['heiko@opendata.durban'],
subject = 'Gas sensor down',
message = 'Weather station '+myname+' gas gauge is not working',
login = 'oddweatherstation',
password = 'winteriscoming')
Gas_flag = 1
except:
print "Gmail doesn't like the machine"
# Dust
print "Eating dust"
try:
pm10_array,pm25_array = dust_helper()
pm10 = np.median(pm10_array) # 10 microns
pm25 = np.median(pm25_array) # 2.5 microns
print 'pm 2.5 = {0:0.1f}, pm 10 = {1:0.1f}'.format(pm25,pm10)
#print 'chilling for a while'
#time.sleep(300) # this can be removed once the timing is sorted out - just here for now to stop the fan spinning up every 3 seconds
except Exception as e:
print"We are but shadows and dust, but not dust in the wind."
etime = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
erf.write(etime)
erf.write('\n')
erf.write(str(e))
erf.write('\n')
if Dust_flag == 0:
try:
sendemail(from_addr = 'oddweatherstation@gmail.com',
to_addr_list = ['heiko@opendata.durban'],
subject = 'Dust sensor down',
message = 'Weather station '+myname+' dust gauge is not working',
login = 'oddweatherstation',
password = 'winteriscoming')
Dust_flag = 1
except:
print "Gmail doesn't like the machine"
# Run wind stuff for 300 seconds...
# Windspeed
print "Checking wind speed"
try:
windspeed_array = windspeed_helper()
windspeed = np.median(windspeed_array)
print 'Wind={0:0.1f} kph'.format(windspeed)
except Exception as e:
print 'Wind failed to pass'
etime = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
erf.write(etime)
erf.write('\n')
erf.write(str(e))
erf.write('\n')
if WS_flag == 0:
try:
sendemail(from_addr = 'oddweatherstation@gmail.com',
to_addr_list = ['heiko@opendata.durban'],
subject = 'Wind speed sensor down',
message = 'Weather station '+myname+' windspeed gauge is not working',
login = 'oddweatherstation',
password = 'winteriscoming')
WS_flag = 1
except:
print "Gmail doesn't like the machine"
# Wind Direction
print "Checking wind direction"
try:
wind_dir_array = read_analog(numSamples=10,pinVal=3)
winddir = np.median(wind_dir_array)
print 'Wind direction = {0:0.1f}'.format(winddir)
except Exception as e:
print "the wind is lacking direction"
etime = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
erf.write(etime)
erf.write('\n')
erf.write(str(e))
erf.write('\n')
if WD_flag == 0:
try:
sendemail(from_addr = 'oddweatherstation@gmail.com',
to_addr_list = ['heiko@opendata.durban'],
subject = 'Wind direction sensor down',
message = 'Weather station '+myname+' wind direction gauge is not working',
login = 'oddweatherstation',
password = 'winteriscoming')
WD_flag = 1
except:
print "Gmail doesn't like the machine"
'''
print 'recording data'
line = str(temperature)+','+str(humidity)+','+str(windspeed)+','+str(winddir)+','+str(gas)+','+str(pm10)+','+str(pm25)+','+str(m_time)
f.write(line)
f.write('\n')
print 'talking to server'
# post to the village
payload = {'temp': temperature,'humid':humidity,'rain' : 0.0, 'press': 0.0}
headers = {'Content-Type': 'application/json', 'Accept':'application/json'}
try:
r = requests.post("http://citizen-sensors.herokuapp.com/ewok-village-5000", data=json.dumps(payload),headers=headers)
except Exception as e:
print "Server not listening to me - no one ever listens to me!!!"
etime = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
erf.write(etime)
erf.write('\n')
erf.write(str(e))
erf.write('\n')
time.sleep(10)
time_later = time.time()
'''
f.close()
erf.close()
| {
"content_hash": "80678064bf4b6d8fe9272c1b41684e1b",
"timestamp": "",
"source": "github",
"line_count": 343,
"max_line_length": 287,
"avg_line_length": 38.822157434402335,
"alnum_prop": 0.5102883748873536,
"repo_name": "opendatadurban/citizen_sensors",
"id": "2769d2e12f788e601f14f3cabebb72d34f57a564",
"size": "13316",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Weather_Station/test_weather_station.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "6282"
},
{
"name": "HTML",
"bytes": "2091"
},
{
"name": "Python",
"bytes": "62879"
},
{
"name": "Shell",
"bytes": "1306"
}
],
"symlink_target": ""
} |
import ct.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ct', '0041_auto_20191125_0955'),
]
operations = [
migrations.AlterField(
model_name='unit',
name='courselet_days',
field=models.IntegerField(blank=True, help_text='2', null=True),
),
migrations.AlterField(
model_name='unit',
name='error_resolution_days',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='unit',
name='follow_up_assessment_grade',
field=models.IntegerField(blank=True, default=15, null=True, validators=[ct.models.percent_validator]),
),
]
| {
"content_hash": "7d42e2abae6f8e74029e1aea0202c5dd",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 115,
"avg_line_length": 29.40740740740741,
"alnum_prop": 0.5831234256926953,
"repo_name": "cjlee112/socraticqs2",
"id": "7fad8f68634b8011146cf5b9af09837380091bf6",
"size": "844",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mysite/ct/migrations/0042_auto_20191125_1339.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "138226"
},
{
"name": "Dockerfile",
"bytes": "3865"
},
{
"name": "Gherkin",
"bytes": "289"
},
{
"name": "HTML",
"bytes": "467395"
},
{
"name": "JavaScript",
"bytes": "234788"
},
{
"name": "Makefile",
"bytes": "4696"
},
{
"name": "Python",
"bytes": "1785754"
},
{
"name": "Shell",
"bytes": "2889"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/structure/general/shared_rock_beach_dark_lg.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "56a3ea5579f0ea3cd615362712bf719a",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 82,
"avg_line_length": 23.923076923076923,
"alnum_prop": 0.6945337620578779,
"repo_name": "obi-two/Rebelion",
"id": "fb70d5ba82f4a2b6822b777d437c6961495c6a1b",
"size": "456",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/static/structure/general/shared_rock_beach_dark_lg.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
'''
Copyright 2016, EMC, Inc.
Author(s):
George Paulos
'''
import os
import sys
import subprocess
sys.path.append(subprocess.check_output("git rev-parse --show-toplevel", shell=True).rstrip("\n") + "/test/fit_tests/common")
import fit_common
# Select test group here using @attr
from nose.plugins.attrib import attr
@attr(all=True, regression=True, smoke=True)
class rackhd11_api_templates(fit_common.unittest.TestCase):
def test_api_11_templates_library(self):
api_data = fit_common.rackhdapi("/api/1.1/templates/library")
self.assertEqual(api_data['status'], 200, "Was expecting code 200. Got " + str(api_data['status']))
for item in api_data['json']:
# check required fields
for subitem in ['contents', 'createdAt', 'id', 'name', 'updatedAt']:
if fit_common.VERBOSITY >= 2:
print "Checking:", item['name'], subitem
self.assertGreater(len(item[subitem]), 0, subitem + ' field error')
def test_api_11_templates_library_ID(self):
api_data = fit_common.rackhdapi("/api/1.1/templates/library")
self.assertEqual(api_data['status'], 200, "Was expecting code 200. Got " + str(api_data['status']))
for item in api_data['json']:
lib_data = fit_common.rackhdapi("/api/1.1/templates/library/" + item['name'])
self.assertEqual(lib_data['status'], 200, "Was expecting code 200. Got " + str(lib_data['status']))
# check required fields
for subitem in ['contents', 'createdAt', 'id', 'name', 'updatedAt']:
if fit_common.VERBOSITY >= 2:
print "Checking:", item['name'], subitem
self.assertGreater(len(item[subitem]), 0, subitem + ' field error')
if __name__ == '__main__':
fit_common.unittest.main()
| {
"content_hash": "214c5a5a9dcf47eefdd55dab4d3eee64",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 125,
"avg_line_length": 43.523809523809526,
"alnum_prop": 0.6192560175054704,
"repo_name": "BillyAbildgaard/RackHD",
"id": "c0cfa20a78bfe0635245a5940dc971e19259a1b8",
"size": "1828",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/fit_tests/tests/rackhd11/test_rackhd11_api_templates.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "696"
},
{
"name": "Python",
"bytes": "727696"
},
{
"name": "Ruby",
"bytes": "10473"
},
{
"name": "Shell",
"bytes": "62533"
}
],
"symlink_target": ""
} |
import web
import hashlib
import time
import json
# import urllib
from markdown import markdown
from sign import sign
from common import *
from config import render,upload_path,app_root,webConfig
from conn import client
from bson.objectid import ObjectId
db = client.pyblog
urls = (
'/0', 'dashboard', # root!!!!!
'/login', 'login',
'/logout', 'logout',
'/reg', 'reg',
'/msg', 'msg',
'/regAccount', 'regAccount'
)
admin = web.application(urls,locals())
#注册
class regAccount:
def GET(self):
# if web.ctx.session.hasLogin:
# raise web.redirect('/')
# else:
return render.regAccount()
def POST(self):
data = web.input()
token = randomString()
email = data['username']
web.header('Content-Type','application/json')
if db['users'].find_one({'username': email}):
# 如果存在
return json.dumps({
'code': 500,
'msg': '账户已存在'
})
else:
db['regToken'].update({
'email': email
},{
'email': email,
'token': token
},True)
webName = webConfig['hostname'].encode('utf8')
web.sendmail('otarim@icloud.com', email, '注册 pyblog', '点击链接跳转到注册页面<a href="'+webName+'/u/reg?token='+token+'">'+webName+'/u/reg?token='+token+'</a>' ,headers=({'Content-Type': 'text/html; charset=UTF-8'}))
return json.dumps({
'code': 200,
'msg': '邮件已发出,请打开邮箱检查收件箱,如果收件箱找不到邮件,可能在垃圾邮件里面可以找到'
})
class reg:
def GET(self):
data = web.input()
if 'token' in data:
result = db['regToken'].find_one({'token': data['token']})
if result:
username = result['email']
return render.reg({
'username': username
})
else:
return web.internalerror('非法操作')
def POST(self):
data = web.input()
if data.password and data.repassword and data.password == data.repassword:
# 锁的问题有点操蛋。。。还是用 mongod 维护多一个表存储自增 id 吧
# findAndModify 会锁定表
uid = db['ids'].find_and_modify(query={'name':'user'},update={'$inc':{'id':1}},new=True)['id']
db['users'].insert({
'uid': int(uid),
'username': data.username,
'nickname': data.nickname,
'avatar': getAvatar(data.username),
'password': hashlib.md5(data.password).hexdigest(),
'regDate': time.time(),
'regIp': web.ctx.ip,
'loginIp': web.ctx.ip,
'lastLoginTime': time.time()
})
# session.hasLogin = True
# session.username = data.username
writeSession({
'hasLogin': True,
'username': data.username
})
web.setcookie('pyname',data.username,72000,path='/')
# web.setcookie('pyconnect',sign(data.username),72000,path='/')
# 删除 token 表中的 document
db['regToken'].remove({'email': data['username']})
return web.redirect('/0')
# 登陆
class login:
def GET(self):
if checkLogin():
return web.redirect('/0')
else:
return render.login()
def POST(self):
data = web.input()
if data.username:
user = db['users'].find_one({'username': data.username})
if user:
if hashlib.md5(data.password).hexdigest() == user.get('password'):
# success
# 更新最后登录时间,ip
db['users'].update({'username': data.username},{
'$set': {
'loginIp': web.ctx.ip,
'lastLoginTime': time.time()
}
})
writeSession({
'hasLogin': True,
'username': data.username
})
web.setcookie('pyname',data.username,36000,path='/')
# web.setcookie('pyconnect',sign(data.username),36000,path='/')
return web.redirect('/0')
else:
return '密码错误'
else:
return '用户不存在'
class logout:
def GET(self):
if web.ctx.has_key('session'):
user = web.ctx.session.get('username')
if(user):
web.setcookie('pyname',user,-1,path='/')
writeSession({
'hasLogin': False,
'username': None
})
# web.setcookie('pyconnect',sign(user),-1,path='/')
raise web.redirect('/login')
# 主页
class dashboard:
# 循环重定向,cookie 域的问题
def GET(self):
if checkLogin():
artist = db['users'].find_one({'username': web.ctx.session.get('username')})
posts = list(db['posts'].find({'artist':artist['_id']}).sort('postDate',-1))
for i in posts:
if i.get('assigns'):
if artist['_id'] == i['artist'] or str(artist['_id']) in i.get('assigns'):
i['assign'] = True
# 我关注的人
following = db['follow'].find_one({'master': artist['_id']},{'follower': 1,'_id': 0})
if following:
following = list(db['users'].find({'_id': {'$in': following['follower']}}))
else:
following = None
# 关注他的人
followers = db['follow'].find({'follower': {'$in': [artist['_id']]}},{'master': 1,'_id': 0})
if followers.count():
followers = getArtistByKey(followers,'master')
else:
followers = None
# 我喜欢的文章
# 点赞的文章
pids = list(db['actions'].find({
'userId': str(artist['_id']),
'action': 1
}, {
'_id': -1,
'postId': 1
}).sort('actionTime', -1))
# 过滤assigns的文章
# 过滤
ids = []
for pid in pids:
ids.append(ObjectId(pid['postId']))
favPosts = list(db['posts'].find({
'_id': {
'$in': ids
}
}))
handlerSpecPostType(favPosts, artist['_id'])
return render.admin({
'user': artist,
'posts': posts,
'following': following,
'followers': followers,
'favPosts': favPosts
})
else:
return web.redirect('/login')
def getAvatar(email):
return 'https://cdn.v2ex.com/gravatar/'+hashlib.md5(email).hexdigest() +'?d=retro';
# ObjectId(post_id) 查询 id
# from bson.objectid import ObjectId
# http://blog.csdn.net/iefreer/article/details/9024993 DELETE 方法
| {
"content_hash": "89203a10692307be58423f26e8cd9c8b",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 208,
"avg_line_length": 25.927884615384617,
"alnum_prop": 0.609122937140738,
"repo_name": "otarim/pyblog",
"id": "24ffe76130c83c308b39d132c6b5fa6fcc808c35",
"size": "5743",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4442"
},
{
"name": "HTML",
"bytes": "73287"
},
{
"name": "JavaScript",
"bytes": "170135"
},
{
"name": "Python",
"bytes": "30856"
}
],
"symlink_target": ""
} |
from math import sin, pi
sinfile = "sintable.dat"
sintable = []
try:
# open table.dat
tablefile = file(sinfile, 'r')
for i in tablefile:
sintable.append(float(i))
if len(sintable) != 256:
raise AttributeError, \
"current file is not a bradian sin table!"
tablefile.close()
except IOError:
# if .sintable.dat doesn't exist, or is
# corrupted, then create a new table...
tablefile = file(sinfile, 'w')
for i in range(256):
sintable.append(sin(i*pi/128.0))
print >>tablefile, sintable[i]
tablefile.close()
# so now we go to the trig functions:
def tsin(beta):
return sintable[int(round(beta)) % 256]
def tcos(beta):
return sintable[int(64 - round(beta)) % 256]
def ttan(beta):
# in all honesty, this would be slightly faster
# if we were to look up the values rather than
# resort to tsin and tcos...
return tsin(beta)/tcos(beta)
def tcot(beta):
# this has the same caveat as ttan
return tcos(beta)/tsin(beta)
# Note that I won't worry about defining secant or
# cosecant. I don't think they're needed, at least
# not for now!
| {
"content_hash": "02e3899bd9f93f79da18f1443128c19b",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 51,
"avg_line_length": 25.454545454545453,
"alnum_prop": 0.6598214285714286,
"repo_name": "snowfarthing/nibbles_3d",
"id": "de2db6008dbd0e3458f8e6897bb9e8cced9e0a75",
"size": "1566",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ttable.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "107891"
}
],
"symlink_target": ""
} |
"""
Organizer user registration
"""
from hackfsu_com.views.generic import PageView
from hackfsu_com.util import acl
class OrganizerRegistrationPage(PageView):
template_name = 'registration/organizer/index.html'
access_manager = acl.AccessManager(acl_accept=[acl.group_user],
acl_deny=[acl.group_hacker, acl.group_organizer,
acl.group_pending_hacker, acl.group_pending_organizer])
| {
"content_hash": "6282280675db2c2eebbf3c31eaae7bb5",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 104,
"avg_line_length": 37.15384615384615,
"alnum_prop": 0.6314699792960663,
"repo_name": "andrewsosa/hackfsu_com",
"id": "19e109418fbd90f5b9295ee03a2b3c0025727bf1",
"size": "483",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "api/webapp/views/registration/organizer/index.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "81944"
},
{
"name": "HTML",
"bytes": "88639"
},
{
"name": "JavaScript",
"bytes": "127887"
},
{
"name": "Python",
"bytes": "279510"
},
{
"name": "Shell",
"bytes": "897"
}
],
"symlink_target": ""
} |
from tempest.api.identity import base
from tempest import clients
from tempest.common import credentials
from tempest.common import custom_matchers
from tempest import config
from tempest import exceptions
import tempest.test
CONF = config.CONF
class BaseObjectTest(tempest.test.BaseTestCase):
@classmethod
def resource_setup(cls):
cls.set_network_resources()
super(BaseObjectTest, cls).resource_setup()
if not CONF.service_available.swift:
skip_msg = ("%s skipped as swift is not available" % cls.__name__)
raise cls.skipException(skip_msg)
cls.isolated_creds = credentials.get_isolated_credentials(
cls.__name__, network_resources=cls.network_resources)
# Get isolated creds for normal user
cls.os = clients.Manager(cls.isolated_creds.get_primary_creds())
# Get isolated creds for admin user
cls.os_admin = clients.Manager(cls.isolated_creds.get_admin_creds())
# Get isolated creds for alt user
cls.os_alt = clients.Manager(cls.isolated_creds.get_alt_creds())
cls.object_client = cls.os.object_client
cls.container_client = cls.os.container_client
cls.account_client = cls.os.account_client
cls.token_client = cls.os_admin.token_client
cls.identity_admin_client = cls.os_admin.identity_client
cls.object_client_alt = cls.os_alt.object_client
cls.container_client_alt = cls.os_alt.container_client
cls.identity_client_alt = cls.os_alt.identity_client
# Make sure we get fresh auth data after assigning swift role
cls.object_client.auth_provider.clear_auth()
cls.container_client.auth_provider.clear_auth()
cls.account_client.auth_provider.clear_auth()
cls.object_client_alt.auth_provider.clear_auth()
cls.container_client_alt.auth_provider.clear_auth()
cls.data = SwiftDataGenerator(cls.identity_admin_client)
@classmethod
def resource_cleanup(cls):
cls.data.teardown_all()
cls.isolated_creds.clear_isolated_creds()
super(BaseObjectTest, cls).resource_cleanup()
@classmethod
def delete_containers(cls, containers, container_client=None,
object_client=None):
"""Remove given containers and all objects in them.
The containers should be visible from the container_client given.
Will not throw any error if the containers don't exist.
Will not check that object and container deletions succeed.
:param containers: list of container names to remove
:param container_client: if None, use cls.container_client, this means
that the default testing user will be used (see 'username' in
'etc/tempest.conf')
:param object_client: if None, use cls.object_client
"""
if container_client is None:
container_client = cls.container_client
if object_client is None:
object_client = cls.object_client
for cont in containers:
try:
objlist = container_client.list_all_container_objects(cont)
# delete every object in the container
for obj in objlist:
try:
object_client.delete_object(cont, obj['name'])
except exceptions.NotFound:
pass
container_client.delete_container(cont)
except exceptions.NotFound:
pass
def assertHeaders(self, resp, target, method):
"""
Common method to check the existence and the format of common response
headers
"""
self.assertThat(resp, custom_matchers.ExistsAllResponseHeaders(
target, method))
self.assertThat(resp, custom_matchers.AreAllWellFormatted())
class SwiftDataGenerator(base.DataGenerator):
def setup_test_user(self, reseller=False):
super(SwiftDataGenerator, self).setup_test_user()
if reseller:
role_name = CONF.object_storage.reseller_admin_role
else:
role_name = CONF.object_storage.operator_role
role_id = self._get_role_id(role_name)
self._assign_role(role_id)
def _get_role_id(self, role_name):
try:
_, roles = self.client.list_roles()
return next(r['id'] for r in roles if r['name'] == role_name)
except StopIteration:
msg = "Role name '%s' is not found" % role_name
raise exceptions.NotFound(msg)
def _assign_role(self, role_id):
self.client.assign_user_role(self.tenant['id'],
self.user['id'],
role_id)
| {
"content_hash": "882971f1b7f8c930aa1eef661d651d02",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 78,
"avg_line_length": 40.57627118644068,
"alnum_prop": 0.6294903926482874,
"repo_name": "afaheem88/tempest_neutron",
"id": "fcb80f5ecc30621ee9941571e795f44524c46fc2",
"size": "5425",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tempest/api/object_storage/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2778383"
},
{
"name": "Shell",
"bytes": "8560"
}
],
"symlink_target": ""
} |
from typing import Optional, List
import logging
import os
import sqlite3
import tempfile
from time import time
class Storage(object):
path = None
def __init__(self, path: Optional[str] = None, user: Optional[str] = None):
log = logging.getLogger('theonionbox')
self.path = None
if path is None:
path = tempfile.gettempdir()
log.debug("Temp directory identified as {}.".format(path))
path = os.path.abspath(path)
if user is None or user == '':
path = os.path.join(path, '.theonionbox.persist')
else:
path = os.path.join(path, '.theonionbox.{}'.format(user))
attempts = 0
while attempts < 2:
try:
with sqlite3.connect(path) as conn:
sql = "CREATE TABLE IF NOT EXISTS nodes (fp string PRIMARY KEY NOT NULL UNIQUE );"
# The UNIQUE constraint ensures that there's always only one record per interval
sql += """CREATE TABLE IF NOT EXISTS bandwidth (fp int,
interval text(2),
timestamp int,
read int,
write int,
UNIQUE (fp, interval, timestamp)
ON CONFLICT REPLACE
);"""
conn.executescript(sql)
log.notice("Persistance data will be written to '{}'.".format(path))
self.path = path
return
except:
log.notice("Failed to create persistance database @ '{}'.".format(path))
path = ':memory:'
attempts += 1
# At this point there's no persistance db created.
# That's domague - yet inevitable.
self.path = None
def get_path(self) -> str:
return self.path
class BandwidthPersistor(object):
def __init__(self, storage: Storage, fingerprint: str):
self.path = None
self.fp = None
self.fpid = None
log = logging.getLogger('theonionbox')
if len(fingerprint) == 0:
log.debug('Skipped registration for persistance of node with fingerprint of length = 0.')
return
path = storage.get_path()
if path is None:
return
conn = self.open_connection(path)
if conn is None:
return
# register this fingerprint
try:
with conn:
conn.execute("INSERT OR IGNORE INTO nodes(fp) VALUES(?);", (fingerprint,))
except Exception as exc:
log.warning('Failed to register {}... for persistance. {}'.format(fingerprint[:6], exc))
return
fpid = None
r = None
try:
cur = conn.cursor()
cur.execute("SELECT ROWID as id FROM nodes WHERE fp=?", (fingerprint,))
r = cur.fetchone()
except Exception as e:
return
# This indicates that fingerprint was successfully registered
try:
fpid = r['id']
except Exception as e:
return
if fpid is not None:
self.path = path
self.fp = fingerprint
self.fpid = fpid
conn.close()
def open_connection(self, path: Optional[str] = None) -> Optional[sqlite3.Connection]:
if path is None:
path = self.path
if path is not None:
try:
conn = sqlite3.connect(path)
conn.row_factory = sqlite3.Row
return conn
except Exception as e:
log = logging.getLogger('theonionbox')
log.warning('Failed to open connection to storage @ {}.'.format(path))
return None
# This does not commit!
def persist(self, interval: str, timestamp: float,
read: Optional[int] = 0, write: Optional[int] = 0, connection: Optional[sqlite3.Connection] = None) -> bool:
if self.fpid is None:
return False
if connection is None:
connection = self.open_connection()
if connection is None:
return False
try:
connection.execute("INSERT INTO bandwidth(fp, interval, timestamp, read, write) VALUES(?, ?, ?, ?, ?)",
(self.fpid, interval, timestamp, read, write))
except Exception as e:
log = logging.getLogger('theonionbox')
log.warning(f'Failed to open persist bandwidth data for fingerprint {self.fp[:6]}: {e}')
return False
return True
# get the data back from the table
def get(self, interval: str, js_timestamp: Optional[int] = int(time()*1000), limit: Optional[int] = -1,
offset: Optional[int] = 0, connection: Optional[sqlite3.Connection] = None) -> Optional[List[sqlite3.Row]]:
if connection is None:
connection = self.open_connection()
if connection is None:
return None
# some SELECT magic to eliminate the need for later manipulation
cur = connection.cursor()
sql = """
SELECT
:jsts as 's',
timestamp * 1000 as 'm',
read as 'r',
write as 'w'
FROM bandwidth
WHERE fp = :fp AND interval = :interval
ORDER BY timestamp DESC
LIMIT :limit OFFSET :offset
"""
try:
cur.execute(sql, {'jsts': js_timestamp,
'fp': self.fpid,
'interval': interval,
'limit': limit,
'offset': offset}
)
except Exception as e:
log = logging.getLogger('theonionbox')
log.warning('Failed to get persisted data: {}'.format(e))
return None
res = cur.fetchall()
return res
| {
"content_hash": "8abbe4da2c6ecdef9d24ef1dffabc34e",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 124,
"avg_line_length": 33.48148148148148,
"alnum_prop": 0.4943109987357775,
"repo_name": "ralphwetzel/theonionbox",
"id": "69abb2dd5cf725b27f4fc8beb10d74bae338f20b",
"size": "6328",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "theonionbox/tob/persistor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "25508"
},
{
"name": "CSS",
"bytes": "151046"
},
{
"name": "Dockerfile",
"bytes": "279"
},
{
"name": "HTML",
"bytes": "330425"
},
{
"name": "JavaScript",
"bytes": "935134"
},
{
"name": "Makefile",
"bytes": "385"
},
{
"name": "Python",
"bytes": "327442"
},
{
"name": "Shell",
"bytes": "4568"
}
],
"symlink_target": ""
} |
from django.db import models
# Create your models here.
class Notice(models.Model):
ORGANIZATION_CHOICES = (
('KIN', 'KIN'),
('SSA', 'SUST Science Arena'),
('DIK', 'Dik Theater'),
)
title = models.CharField(max_length=150)
body = models.TextField(blank=True)
pubdate = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
organization = models.CharField(max_length=3, choices=ORGANIZATION_CHOICES)
class Meta:
verbose_name_plural = "Notice Board"
ordering = ["-pubdate"]
def __unicode__(self):
return self.title[:50]
| {
"content_hash": "62f8f8155582fdcd9cef684ddd4553dd",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 79,
"avg_line_length": 27.82608695652174,
"alnum_prop": 0.6359375,
"repo_name": "salmanwahed/haystack-test-project",
"id": "c997d91ffb3f1bca34468bb0e6311331cac2410c",
"size": "640",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "haystack_test/noticeboard/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "48167"
},
{
"name": "HTML",
"bytes": "2254"
},
{
"name": "JavaScript",
"bytes": "78463"
},
{
"name": "Python",
"bytes": "7368"
}
],
"symlink_target": ""
} |
import os, re, sys, copy
from optparse import OptionParser
from immunoseq.lib.immunoseqLib import *
import matplotlib.cm as cm
import matplotlib.pyplot as pyplot
from matplotlib.ticker import *
from matplotlib.font_manager import FontProperties
import matplotlib.backends.backend_pdf as pltBack
def getData(seqs1, seqs2):
points = {} #key = (xval, yval) #val = counts (number of points with this x and y)
for aa, v2j in seqs1.iteritems():
for v, j2seq in v2j.iteritems():
for j, seq in j2seq.iteritems():
x = seq.count
y = 0
if aa in seqs2 and v in seqs2[aa] and j in seqs2[aa][v]:
y = seqs2[aa][v][j].count
point = (x, y)
if point not in points:
points[point] = 1
else:
points[point] += 1
for aa, v2j in seqs2.iteritems():
for v, j2seq in v2j.iteritems():
for j, seq in j2seq.iteritems():
if aa in seqs1 and v in seqs1[aa] and j in seqs1[aa][v]:
continue
x = 0
y = seq.count
point = (x, y)
if point not in points:
points[point] = 1
else:
points[point] += 1
xdata = []
ydata = []
counts = []
for point, count in points.iteritems():
xdata.append(point[0])
ydata.append(point[1])
counts.append(count)
return xdata, ydata, counts
def scatterPlot(pair, samples, options):
isAbs = not options.rel
sample1 = pair[0]
sample2 = pair[1]
seqs1 = samples[sample1].seqs
seqs2 = samples[sample2].seqs
#Plot:
options.out = "%s-%s-scatter" %(sample1, sample2)
if not isAbs:
options.out += "-rel"
fig, pdf = initImage(10.0, 10.0, options)
data1, data2, counts = getData(seqs1, seqs2)
if not isAbs:
total1 = samples[sample1].total
if total1 > 0:
data1 = [ d*100.0/total1 for d in data1]
total2 = samples[sample2].total
if total2 > 0:
data2 = [ d*100.0/total2 for d in data2]
#else:
nonzeros = []
for i, d1 in enumerate(data1):
if d1 > 0:
nonzeros.append(d1)
if data2[i] > 0:
nonzeros.append(data2[i])
minNonzeros = min(nonzeros)
zeroReplace = float(minNonzeros)/10.0
if not isAbs:
zeroReplace = 1.0
while minNonzeros < 1:
zeroReplace /= 10.0
minNonzeros *= 10
for i in xrange( len(data1) ):
if data1[i] == 0:
data1[i] = zeroReplace
if data2[i] == 0:
data2[i] = zeroReplace
axes = fig.add_axes( [0.08, 0.1, 0.87, 0.8] )
xmax = max(data1)
xmin = min(data1)
ymax = max(data2)
ymin = min(data2)
xymax = max([xmax, ymax])
xymin = min([xmin, ymin])
range = xymax - xymin
xymax += range*0.01
#if not isAbs:
# xymin -= range*0.01
#axes.plot( data1, data2, marker='o', markeredgecolor='b', markersize=5.0, linestyle='none' )
maxcount = float(max(counts))
#cm = matplotlib.cm.jet
#counts = [ cm.jet( c/maxcount ) for c in counts]
counts = [ cm.jet( c ) for c in counts]
#colors = [colorConvert.to_rgb(float(c)/maxcount) for c in counts]
cax = axes.scatter( data1, data2, color=counts, marker='o' )
#fig.colorbar(cax, ticks=[min(counts), max(counts)], orientation='vertical')
axes.plot( [xymin, xymax], [xymin, xymax], linestyle='-', linewidth=0.4, color="#848484" )
#Manually draw grid lines because freaking keynote would not display the grid using axes.x/yaxis.grid
for x in [0.00001, 0.0001, 0.001, 0.01, 0.1, 1, 10, 100]:
axes.plot( [x, x], [xymin, 100], linestyle='-', linewidth=0.4, color='#848484')
for y in [0.00001, 0.0001, 0.001, 0.01, 0.1, 1, 10, 100]:
axes.plot( [xymin, 100], [y,y], linestyle='-', linewidth=0.4, color='#848484')
#HACK
#xymax = 1
#END HACK
axes.set_xlim(xymin, xymax)
axes.set_ylim(xymin, xymax)
#if isAbs:
axes.set_xscale('log')
axes.set_yscale('log')
axes.set_title( 'Clone Size in %s versus %s' %(sample1, sample2) )
axes.set_xlabel('%s read count' %sample1)
axes.set_ylabel('%s read count' %sample2)
if not isAbs:
axes.set_xlabel('%s read frequency' %sample1)
axes.set_ylabel('%s read frequency' %sample2)
#axes.yaxis.grid(b=True, color="#848484", linestyle='-', linewidth=0.005)
#axes.xaxis.grid(b=True, color="#848484", linestyle='-', linewidth=0.005)
#fig.savefig( pdf, format='pdf' )
#pdf.close()
writeImage( fig, pdf, options)
#Tab:
#tabfile = "%s-%s-tab.tex"
def readPairsFile(file):
pairs = []
f = open(file, 'r')
for line in f:
items = line.strip().split()
if len(items) != 2:
sys.stderr.write("Wrong pair file format\n")
sys.exit(1)
pairs.append( items )
return pairs
def main():
usage = '%prog indir pairsFile'
parser = OptionParser(usage = usage)
initPlotOptions( parser )
#parser.add_option('-p', '--percent', dest='percent', type='float', default=0.0, help='Minimum relative size (percentage of total reads) required. Default=%default')
parser.add_option('-c', '--count', dest='count', type='int', default=1, help='Minimum number of reads required. Default=%default')
parser.add_option('-r', '--relative', dest='rel', action='store_true', default=False, help='If specified, will draw the relative (frequency) scatter plot instead of reads count')
options, args = parser.parse_args()
checkPlotOptions( options, parser )
indir = args[0]
pairsFile = args[1]
pairs = readPairsFile(pairsFile)
sys.stderr.write("Done readding pairs\n")
samples = readfiles(indir, options.count, 2)
name2sample = {}
for sample in samples:
name2sample[sample.name] = sample
for pair in pairs:
scatterPlot(pair, name2sample, options)
sys.stderr.write("Done plot of pair %s, %s\n" %(pair[0], pair[1]))
if __name__ == '__main__':
main()
| {
"content_hash": "c31ff67e34485de234a129232367e6b1",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 182,
"avg_line_length": 30.748768472906406,
"alnum_prop": 0.5719320730535085,
"repo_name": "ngannguyen/immunoseq",
"id": "f41c96fff83ba4536bb5110653c906a682197098",
"size": "6491",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pairwiseScatter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "924"
},
{
"name": "Python",
"bytes": "976596"
}
],
"symlink_target": ""
} |
import json
import json_myobj
obj = json_myobj.MyObj('instance value goes here')
print('First attempt')
try:
print(json.dumps(obj))
except TypeError as err:
print('ERROR:', err)
def convert_to_builtin_type(obj):
print('default(', repr(obj), ')')
# Convert objects to a dictionary of their representation
d = {
'__class__': obj.__class__.__name__,
'__module__': obj.__module__,
}
d.update(obj.__dict__)
return d
print()
print('With default')
print(json.dumps(obj, default=convert_to_builtin_type))
| {
"content_hash": "37280a6f56475165ba9e4c45abb80de5",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 61,
"avg_line_length": 21.192307692307693,
"alnum_prop": 0.6261343012704175,
"repo_name": "jasonwee/asus-rt-n14uhp-mrtg",
"id": "1a811012c7237034e167e88df7b29712006d0db6",
"size": "551",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/lesson_the_internet/json_dump_default.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "45876"
},
{
"name": "HTML",
"bytes": "107072"
},
{
"name": "JavaScript",
"bytes": "161335"
},
{
"name": "Python",
"bytes": "6923750"
},
{
"name": "Shell",
"bytes": "7616"
}
],
"symlink_target": ""
} |
from django.db import connection
from django.http import HttpResponseNotAllowed
from django.template import loader
from django.middleware.locale import LocaleMiddleware
from django.utils.deprecation import MiddlewareMixin
from django.utils.translation.trans_real import parse_accept_lang_header
class HTTPResponseNotAllowedMiddleware(MiddlewareMixin):
def process_response(self, request, response):
if isinstance(response, HttpResponseNotAllowed):
response.content = loader.render_to_string(
"405.html", request=request)
return response
class LocaleMiddlewareWithTweaks(LocaleMiddleware):
"""
Overrides LocaleMiddleware from django with:
Khmer `km` language code in Accept-Language is rewritten to km-kh
"""
def process_request(self, request):
accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')
try:
codes = [code for code, r in parse_accept_lang_header(accept)]
if 'km' in codes and 'km-kh' not in codes:
request.META['HTTP_ACCEPT_LANGUAGE'] = accept.replace('km',
'km-kh')
except:
# this might fail if i18n is disabled.
pass
super().process_request(request)
class SqlLogging(MiddlewareMixin):
def process_response(self, request, response):
from sys import stdout
if stdout.isatty():
for query in connection.queries:
print("\033[1;31m[%s]\033[0m \033[1m%s\033[0m" % (
query['time'], " ".join(query['sql'].split())))
return response
class UsernameInResponseHeaderMiddleware(MiddlewareMixin):
"""
Record the authenticated user (if any) in the `X-KoBoNaUt` HTTP header
"""
def process_response(self, request, response):
try:
user = request.user
except AttributeError:
return response
if user.is_authenticated:
response['X-KoBoNaUt'] = request.user.username
return response
| {
"content_hash": "5b780d6e1f243b2b41e01b7d1bbd14c4",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 78,
"avg_line_length": 34.18032786885246,
"alnum_prop": 0.6316546762589929,
"repo_name": "kobotoolbox/kobocat",
"id": "773392524f5daedbbdac728a781b97fdf55f096a",
"size": "2102",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "onadata/libs/utils/middleware.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "146326"
},
{
"name": "Dockerfile",
"bytes": "3965"
},
{
"name": "HTML",
"bytes": "136962"
},
{
"name": "JavaScript",
"bytes": "734122"
},
{
"name": "Less",
"bytes": "19821"
},
{
"name": "Makefile",
"bytes": "2286"
},
{
"name": "Python",
"bytes": "1264157"
},
{
"name": "Shell",
"bytes": "9858"
}
],
"symlink_target": ""
} |
"""Profile urls."""
from django.conf.urls import url
from .views import Profile, PublicProfile, EditProfile
from django.contrib.auth.decorators import login_required
urlpatterns = [
url(r'^edit', EditProfile.as_view(), name='edit-profile'),
url(r'^(?P<username>\w+)', PublicProfile.as_view(), name='public_profile'),
url(r'^$', login_required(Profile.as_view()), name='private_profile')
]
| {
"content_hash": "a26635fc3afc2efa63eba8570dd58d51",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 79,
"avg_line_length": 40.2,
"alnum_prop": 0.7039800995024875,
"repo_name": "pasaunders/django-imager",
"id": "e69aa53bff1309a850104aa805e809f99e4cf458",
"size": "402",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "imagersite/imager_profile/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "13779"
},
{
"name": "Python",
"bytes": "62985"
}
],
"symlink_target": ""
} |
import subprocess
import types
import pytest
import salt.client.ssh.shell as shell
from tests.support.mock import patch
@pytest.fixture
def keys(tmp_path):
pub_key = tmp_path / "ssh" / "testkey.pub"
priv_key = tmp_path / "ssh" / "testkey"
return types.SimpleNamespace(pub_key=pub_key, priv_key=priv_key)
@pytest.mark.skip_on_windows(reason="Windows does not support salt-ssh")
@pytest.mark.skip_if_binaries_missing("ssh", "ssh-keygen", check_all=True)
def test_ssh_shell_key_gen(keys):
"""
Test ssh key_gen
"""
shell.gen_key(str(keys.priv_key))
assert keys.priv_key.exists()
assert keys.pub_key.exists()
# verify there is not a passphrase set on key
ret = subprocess.check_output(
["ssh-keygen", "-f", str(keys.priv_key), "-y"],
timeout=30,
)
assert ret.decode().startswith("ssh-rsa")
@pytest.mark.skip_on_windows(reason="Windows does not support salt-ssh")
@pytest.mark.skip_if_binaries_missing("ssh", "ssh-keygen", check_all=True)
def test_ssh_shell_exec_cmd(caplog):
"""
Test executing a command and ensuring the password
is not in the stdout/stderr logs.
"""
passwd = "12345"
opts = {"_ssh_version": (4, 9)}
host = ""
_shell = shell.Shell(opts=opts, host=host)
_shell.passwd = passwd
with patch.object(_shell, "_split_cmd", return_value=["echo", passwd]):
ret = _shell.exec_cmd("echo {}".format(passwd))
assert not any([x for x in ret if passwd in str(x)])
assert passwd not in caplog.text
with patch.object(_shell, "_split_cmd", return_value=["ls", passwd]):
ret = _shell.exec_cmd("ls {}".format(passwd))
assert not any([x for x in ret if passwd in str(x)])
assert passwd not in caplog.text
| {
"content_hash": "61f7cee85c7ba6deba630cf75812e841",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 75,
"avg_line_length": 32.68518518518518,
"alnum_prop": 0.648158640226629,
"repo_name": "saltstack/salt",
"id": "37065c4c187601b2b2a59b93fd8737fe353c888f",
"size": "1765",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/pytests/unit/client/ssh/test_shell.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14911"
},
{
"name": "C",
"bytes": "1571"
},
{
"name": "Cython",
"bytes": "1458"
},
{
"name": "Dockerfile",
"bytes": "184"
},
{
"name": "Groovy",
"bytes": "12318"
},
{
"name": "HCL",
"bytes": "257"
},
{
"name": "HTML",
"bytes": "8031"
},
{
"name": "Jinja",
"bytes": "45598"
},
{
"name": "Makefile",
"bytes": "713"
},
{
"name": "NSIS",
"bytes": "76572"
},
{
"name": "PowerShell",
"bytes": "75891"
},
{
"name": "Python",
"bytes": "41444811"
},
{
"name": "Rich Text Format",
"bytes": "6242"
},
{
"name": "Roff",
"bytes": "191"
},
{
"name": "Ruby",
"bytes": "961"
},
{
"name": "SaltStack",
"bytes": "35856"
},
{
"name": "Scheme",
"bytes": "895"
},
{
"name": "Scilab",
"bytes": "1147"
},
{
"name": "Shell",
"bytes": "524917"
}
],
"symlink_target": ""
} |
from django.shortcuts import render, redirect
from django.http import HttpResponseRedirect
from .models import SupportProject
# Create your views here.
def index( request ):
sp = SupportProject.objects.all()
if sp.count() == 1:
return HttpResponseRedirect( sp.first().project.get_absolute_url() )
else:
context_dict = { 'sps' : sp, }
return render( request, 'support/index.html', context_dict )
| {
"content_hash": "a4a230430fcaaca2cccd79cc6e4fca72",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 76,
"avg_line_length": 29.333333333333332,
"alnum_prop": 0.6795454545454546,
"repo_name": "postpdm/ich_bau",
"id": "c77bfcd69447b6d8753b518a3930aaea586d8856",
"size": "440",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "support/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "59357"
},
{
"name": "Python",
"bytes": "195507"
}
],
"symlink_target": ""
} |
from ._compat import FileNotFoundError
class SnakePitException(Exception):
"""Base exception class"""
class ConfigDoesNotExist(SnakePitException, FileNotFoundError):
"""Raised when config file not found."""
class InvalidConfiguration(SnakePitException):
"""Raised when does not open config file."""
class RequirementsKeyError(SnakePitException, KeyError):
"""Raised when key not found in config file."""
class DistributionNotFound(SnakePitException):
"""Raised when distribution not found."""
| {
"content_hash": "a72113c98fdebd28167b72062d40e09a",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 63,
"avg_line_length": 24.904761904761905,
"alnum_prop": 0.7552581261950286,
"repo_name": "kk6/snake-pit",
"id": "9e11e3f41dfe158b06e064625f1569ba24022df0",
"size": "548",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "snakepit/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21886"
}
],
"symlink_target": ""
} |
"""A wrapper for subprocess to make calling shell commands easier."""
import os
import logging
import pipes
import signal
import subprocess
import tempfile
import constants
def Popen(args, stdout=None, stderr=None, shell=None, cwd=None, env=None):
return subprocess.Popen(
args=args, cwd=cwd, stdout=stdout, stderr=stderr,
shell=shell, close_fds=True, env=env,
preexec_fn=lambda: signal.signal(signal.SIGPIPE, signal.SIG_DFL))
def Call(args, stdout=None, stderr=None, shell=None, cwd=None, env=None):
pipe = Popen(args, stdout=stdout, stderr=stderr, shell=shell, cwd=cwd,
env=env)
pipe.communicate()
return pipe.wait()
def RunCmd(args, cwd=None):
"""Opens a subprocess to execute a program and returns its return value.
Args:
args: A string or a sequence of program arguments. The program to execute is
the string or the first item in the args sequence.
cwd: If not None, the subprocess's current directory will be changed to
|cwd| before it's executed.
Returns:
Return code from the command execution.
"""
logging.info(str(args) + ' ' + (cwd or ''))
return Call(args, cwd=cwd)
def GetCmdOutput(args, cwd=None, shell=False):
"""Open a subprocess to execute a program and returns its output.
Args:
args: A string or a sequence of program arguments. The program to execute is
the string or the first item in the args sequence.
cwd: If not None, the subprocess's current directory will be changed to
|cwd| before it's executed.
shell: Whether to execute args as a shell command.
Returns:
Captures and returns the command's stdout.
Prints the command's stderr to logger (which defaults to stdout).
"""
(_, output) = GetCmdStatusAndOutput(args, cwd, shell)
return output
def GetCmdStatusAndOutput(args, cwd=None, shell=False):
"""Executes a subprocess and returns its exit code and output.
Args:
args: A string or a sequence of program arguments. The program to execute is
the string or the first item in the args sequence.
cwd: If not None, the subprocess's current directory will be changed to
|cwd| before it's executed.
shell: Whether to execute args as a shell command.
Returns:
The tuple (exit code, output).
"""
if isinstance(args, basestring):
args_repr = args
if not shell:
raise Exception('string args must be run with shell=True')
elif shell:
raise Exception('array args must be run with shell=False')
else:
args_repr = ' '.join(map(pipes.quote, args))
s = '[host]'
if cwd:
s += ':' + cwd
s += '> ' + args_repr
logging.info(s)
tmpout = tempfile.TemporaryFile(bufsize=0)
tmperr = tempfile.TemporaryFile(bufsize=0)
exit_code = Call(args, cwd=cwd, stdout=tmpout, stderr=tmperr, shell=shell)
tmperr.seek(0)
stderr = tmperr.read()
tmperr.close()
if stderr:
logging.critical(stderr)
tmpout.seek(0)
stdout = tmpout.read()
tmpout.close()
if len(stdout) > 4096:
logging.debug('Truncated output:')
logging.debug(stdout[:4096])
return (exit_code, stdout)
| {
"content_hash": "09d258232c736a1eef22de2a3947b92c",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 80,
"avg_line_length": 30.392156862745097,
"alnum_prop": 0.6938709677419355,
"repo_name": "cvsuser-chromium/chromium",
"id": "dba399f8193f333ebcc94a880cb540ba83d90976",
"size": "3267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build/android/pylib/cmd_helper.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Assembly",
"bytes": "36421"
},
{
"name": "C",
"bytes": "6924841"
},
{
"name": "C++",
"bytes": "179649999"
},
{
"name": "CSS",
"bytes": "812951"
},
{
"name": "Java",
"bytes": "3768838"
},
{
"name": "JavaScript",
"bytes": "8338074"
},
{
"name": "Makefile",
"bytes": "52980"
},
{
"name": "Objective-C",
"bytes": "819293"
},
{
"name": "Objective-C++",
"bytes": "6453781"
},
{
"name": "PHP",
"bytes": "61320"
},
{
"name": "Perl",
"bytes": "17897"
},
{
"name": "Python",
"bytes": "5640877"
},
{
"name": "Rebol",
"bytes": "262"
},
{
"name": "Shell",
"bytes": "648699"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "15926"
}
],
"symlink_target": ""
} |
"""Stack implementation using linked list
"""
class Node():
def __init__(self):
self.item = None
self.next = None
class Stack():
def __init__(self):
self.first = None
def push(self, item):
node = Node()
node.item = item
node.next = self.first
self.first = node
def pop(self):
if self.first is None:
return None
prev_first = self.first
self.first = self.first.next
return prev_first
def main():
stack = Stack()
stack.push(3)
stack.push(2)
stack.push(6)
assert stack.pop().item == 6
assert stack.pop().item == 2
assert stack.pop().item == 3
if __name__ == "__main__":
main() | {
"content_hash": "28260274e0a19307712e732641bad5c1",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 41,
"avg_line_length": 15.475,
"alnum_prop": 0.6252019386106623,
"repo_name": "Yasik/algorithms-collection",
"id": "5c016f14fac0550c2d34c0ab1e5ed8ec467f17f9",
"size": "638",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/linked_list_stack.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "101835"
},
{
"name": "Python",
"bytes": "3539"
}
],
"symlink_target": ""
} |
import absl.testing
import test_util
model_path = "https://tfhub.dev/tulasiram58827/lite-model/craft-text-detector/dr/1?lite-format=tflite"
# Failure: Resize lowering does not handle inferred dynamic shapes. Furthermore, the entire model
# requires dynamic shape support.
class CraftTextTest(test_util.TFLiteModelTest):
def __init__(self, *args, **kwargs):
super(CraftTextTest, self).__init__(model_path, *args, **kwargs)
def compare_results(self, iree_results, tflite_results, details):
super(CraftTextTest, self).compare_results(iree_results, tflite_results, details)
def test_compile_tflite(self):
self.compile_and_execute()
if __name__ == '__main__':
absl.testing.absltest.main()
| {
"content_hash": "fc7974a1686703f0d068f7187e9bb8d3",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 102,
"avg_line_length": 33.857142857142854,
"alnum_prop": 0.7355836849507735,
"repo_name": "iree-org/iree-samples",
"id": "d533bacf8a368fd016190e9a90e6c731fa18aa4e",
"size": "741",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tflitehub/craft_text_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "17305"
},
{
"name": "C++",
"bytes": "35644"
},
{
"name": "CMake",
"bytes": "8560"
},
{
"name": "MLIR",
"bytes": "199"
},
{
"name": "Python",
"bytes": "129624"
},
{
"name": "Shell",
"bytes": "991"
}
],
"symlink_target": ""
} |
"""Class to transform an subgraph into another.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from copy import deepcopy
from functools import partial
from six import iteritems
from six import iterkeys
from six import string_types
from six import StringIO
from tensorflow.contrib.graph_editor import reroute
from tensorflow.contrib.graph_editor import select
from tensorflow.contrib.graph_editor import subgraph
from tensorflow.contrib.graph_editor import util
from tensorflow.python.framework import ops as tf_ops
from tensorflow.python.platform import tf_logging as logging
__all__ = [
"replace_t_with_placeholder_handler",
"keep_t_if_possible_handler",
"assign_renamed_collections_handler",
"transform_op_if_inside_handler",
"copy_op_handler",
"Transformer",
"TransformerInfo",
"copy",
"copy_with_input_replacements",
"graph_replace",
]
def replace_t_with_placeholder_handler(info, t):
"""Transform a tensor into a placeholder tensor.
This handler is typically used to transform a subgraph input tensor into a
placeholder.
Args:
info: Transform._TmpInfo instance.
t: tensor whose input must be transformed into a place holder.
Returns:
The tensor generated by the newly created place holder.
"""
with info.graph_.as_default():
t_ = util.make_placeholder_from_tensor(t, scope=info.scope_)
return t_
def keep_t_if_possible_handler(info, t):
"""Transform a tensor into itself (identity) if possible.
This handler transform a tensor into itself if the source and destination
graph are the same. Otherwise it will create a placeholder.
This handler is typically used to transform a hidden input tensors.
Args:
info: Transform._TmpInfo instance.
t: tensor whose input must be transformed into a place holder.
Returns:
The tensor generated by the newly created place holder.
"""
if info.graph is info.graph_:
return t
else:
return replace_t_with_placeholder_handler(info, t)
def assign_renamed_collections_handler(info, elem, elem_):
"""Add the transformed elem to the (renamed) collections of elem.
A collection is renamed only if is not a known key, as described in
`tf.GraphKeys`.
Args:
info: Transform._TmpInfo instance.
elem: the original element (`tf.Tensor` or `tf.Operation`)
elem_: the transformed element
"""
known_collection_names = util.get_predefined_collection_names()
for name, collection in iteritems(info.collections):
if elem not in collection:
continue
if name in known_collection_names:
transformed_name = name
else:
transformed_name = info.new_name(name)
info.graph_.add_to_collection(transformed_name, elem_)
def transform_op_if_inside_handler(info, op, keep_if_possible=True):
"""Transform an optional op only if it is inside the subgraph.
This handler is typically use to handle original op: it is fine to keep them
if they are inside the subgraph, otherwise they are just ignored.
Args:
info: Transform._TmpInfo instance.
op: the optional op to transform (or ignore).
keep_if_possible: re-attach to the original op if possible, that is,
if the source graph and the destination graph are the same.
Returns:
The transformed op or None.
"""
if op in info.sgv.ops:
return info.transformed_ops[op]
else:
if keep_if_possible and info.graph is info.graph_:
return op
else:
return None
def copy_op_handler(info, op, new_inputs, copy_shape=True, nodedef_fn=None):
"""Copy a `tf.Operation`.
Args:
info: Transform._TmpInfo instance.
op: the `tf.Operation` to be copied.
new_inputs: The new inputs for this op.
copy_shape: also copy the shape of the tensor
nodedef_fn: If provided, a function that will be run on the NodeDef
and should return a mutated NodeDef before a new Operation is created.
This is useful as certain features cannot be set on the Operation and
must be modified in NodeDef.
Returns:
A `(op, op_outputs)` tuple containing the transformed op and its outputs.
"""
# The `new_inputs` was added to this function. For compatibility reason,
# let's raise an error if `new_inputs` is a boolean.
if isinstance(new_inputs, bool):
raise TypeError("the `new_inputs` argument must be an iterable.")
# pylint: disable=protected-access
# Clone the node def:
node_def_ = deepcopy(op.node_def)
# Transform name:
name_ = info.new_name(op.name)
name_ = info.graph_.unique_name(name_)
node_def_.name = name_
# Mutate NodeDef if requested:
if nodedef_fn is not None:
node_def_ = nodedef_fn(node_def_)
# Copy the other inputs needed for initialization
output_types_ = op._output_types[:]
input_types_ = op._input_types[:]
# Make a copy of the op_def too.
# Its unique to every _type_ of Operation.
op_def_ = deepcopy(op.op_def)
# Initialize a new Operation instance
op_ = tf_ops.Operation(node_def_, info.graph_, new_inputs, output_types_,
[], input_types_, None, op_def_)
# copy the shape over
if copy_shape:
for t, t_ in zip(op.outputs, op_.outputs):
t_.set_shape(t.get_shape())
# Original op cannot be finalised here yet. Because some ops require this
# attribute to exist, we will create a dummy original_op first and then
# later finalise it with the actual original_op when all the ops have
# been copied.
# TODO(fkp): Stop worrying about _original_op and remove this code?
if op._original_op:
op_._original_op = op._original_op
# Add op to the graph
info.graph_._add_op(op_)
return op_, op_.outputs
class TransformerInfo(object):
""""Contains information about the result of a transform operation."""
def __init__(self, info):
"""Constructor.
Args:
info: an instance of Transformer._TmpInfo containing various internal
information about the transform operation.
"""
self._graph = info.graph
self._scope = info.scope
self._graph_ = info.graph_
self._scope_ = info.scope_
self._transformed_ops = info.transformed_ops
self._transformed_ts = info.transformed_ts
def _get_transformed_map(self, top):
"""Return the correct container depending on the type of `top`."""
if isinstance(top, tf_ops.Operation):
return self._transformed_ops
elif isinstance(top, tf_ops.Tensor):
return self._transformed_ts
else:
raise TypeError(
"Expected a tf.Tensor or a tf.Operation, got a {}".format(
type(top)))
def _transformed_elem(self, original_top, missing_fn=None):
"""Return the transformed op/tensor corresponding to the original one.
Args:
original_top: the original tensor/operation.
missing_fn: function handling the case where the counterpart
cannot be found. By default, None is returned.
Returns:
the transformed tensor/operation (or None if no match is found).
"""
transformed_map = self._get_transformed_map(original_top)
if isinstance(original_top, string_types):
for original, transformed in iteritems(transformed_map):
if original.name == original_top:
return transformed
return None if missing_fn is None else missing_fn(original_top)
else:
if original_top not in transformed_map:
return None if missing_fn is None else missing_fn(original_top)
return transformed_map[original_top]
def _original_elem(self, transformed_top, missing_fn=None):
"""Return the original op/tensor corresponding to the transformed one.
Args:
transformed_top: the transformed tensor/operation.
missing_fn: function handling the case where the counterpart
cannot be found. By default, None is returned.
Returns:
the original tensor/operation (or None if no match is found).
"""
transformed_map = self._get_transformed_map(transformed_top)
if isinstance(transformed_top, string_types):
finder = lambda transformed: transformed.name == transformed_top
else:
finder = lambda transformed: transformed == transformed_top
for original, transformed in iteritems(transformed_map):
if finder(transformed):
return original
return None if missing_fn is None else missing_fn(transformed_top)
def transformed(self, original, missing_fn=None):
"""Return the transformed op/tensor corresponding to the original one.
Note that the output of this function mimics the hierarchy
of its input argument `original`.
Given an iterable, it returns a list. Given an operation or a tensor,
it will return an operation or a tensor.
Args:
original: the original tensor/operation.
missing_fn: function handling the case where the counterpart
cannot be found. By default, None is returned.
Returns:
the transformed tensor/operation (or None if no match is found).
"""
transformed_elem = partial(self._transformed_elem, missing_fn=missing_fn)
return util.transform_tree(original, transformed_elem)
def original(self, transformed, missing_fn=None):
"""Return the original op/tensor corresponding to the transformed one.
Note that the output of this function mimics the hierarchy
of its input argument `transformed`.
Given an iterable, it returns a list. Given an operation or a tensor,
it will return an operation or a tensor.
Args:
transformed: the transformed tensor/operation.
missing_fn: function handling the case where the counterpart
cannot be found. By default, None is returned.
Returns:
the original tensor/operation (or None if no match is found).
"""
original_elem = partial(self._original_elem, missing_fn=missing_fn)
return util.transform_tree(transformed, original_elem)
def __str__(self):
res = StringIO()
print("Transform result info:", file=res)
if self._graph == self._graph_:
in_place_str = "" if self._scope_ else " IN-PLACE"
print(" Within graph[{}]{}".format(
id(self._graph), in_place_str), file=res)
else:
print(" graph[{}] => graph[{}]".format(
id(self._graph), id(self._graph_)), file=res)
if self._scope:
print(" Relative to source scope: {}".format(self._scope), file=res)
if self._scope_:
print(" Scope destination: {}".format(self._scope_), file=res)
print("Operations mapping:", file=res)
for op, op_ in iteritems(self._transformed_ops):
print(" {} => {}".format(op.name, op_.name), file=res)
return res.getvalue()
class _TmpInfo(object):
"""Transformer temporary data.
An instance of this class holds all the information relevant to a call
to a transformer instance (that is, a call to __call__). An instance
is created for the life-time of the __call__ function and is passed as
argument to the handlers.
"""
def __init__(self, sgv, dst_graph, dst_scope, src_scope):
self.sgv = sgv
self.sgv_inputs_set = frozenset(sgv.inputs)
self.ops = frozenset(sgv.ops)
self.control_outputs = util.ControlOutputs(sgv.graph)
self.graph = sgv.graph
self.scope = src_scope
self.graph_ = dst_graph
self.scope_ = dst_scope
self.transformed_ops = {}
self.transformed_ts = {}
self.collections = dict((key, self.graph.get_collection(key))
for key in self.graph.get_all_collection_keys())
self.cyclic_ops = []
self.transform_original_op_handler = transform_op_if_inside_handler
# The graph is transformed op by op, in the same order the original ops
# were created. However, this is sometimes not possible due to cycles
# (i.e. while loops). So when the transformer creates a new op whose
# inputs do not exist yet, temporary placeholders are created and stored
# in this `tmp_cyclic_ts` container. During a second pass,
# those temporary tensors are replaced by the proper transformed tensors
# (see the function `_finalize_cycles`).
self.tmp_cyclic_ts = []
def new_name(self, name):
"""Compute a destination name from a source name.
Args:
name: the name to be "transformed".
Returns:
The transformed name.
Raises:
ValueError: if the source scope is used (that is, not an empty string)
and the source name does not belong to the source scope.
"""
scope = self.scope
if not name.startswith(scope):
raise ValueError("{} does not belong to source scope: {}.".format(
name, scope))
rel_name = name[len(scope):]
name_ = self.scope_ + rel_name
return name_
class Transformer(object):
"""Transform a subgraph into another one.
By default, the constructor create a transform which copy a subgraph and
replaces inputs with placeholders. This behavior can be modified by changing
the handlers.
"""
def __init__(self):
"""Transformer constructor.
The following members can be modified:
transform_op_handler: handle the transformation of a `tf.Operation`.
This handler defaults to a simple copy.
assign_collections_handler: handle the assignment of collections.
This handler defaults to assigning new collections created under the
given name-scope.
transform_external_input_handler: handle the transform of the inputs to
the given subgraph. This handler defaults to creating placeholders
instead of the ops just before the input tensors of the subgraph.
transform_external_hidden_input_handler: handle the transform of the
hidden inputs of the subgraph, that is, the inputs which are not listed
in sgv.inputs. This handler defaults to a transform which keep the same
input if the source and destination graphs are the same, otherwise
use placeholders.
transform_original_op_handler: handle the transform of original_op. This
handler defaults to transforming original_op only if they are in the
subgraph, otherwise they are ignored.
"""
# handlers
self.transform_op_handler = copy_op_handler
self.transform_control_input_handler = transform_op_if_inside_handler
self.assign_collections_handler = assign_renamed_collections_handler
self.transform_external_input_handler = replace_t_with_placeholder_handler
self.transform_external_hidden_input_handler = keep_t_if_possible_handler
self.transform_original_op_handler = transform_op_if_inside_handler
def __call__(self,
sgv,
dst_graph,
dst_scope,
src_scope="",
reuse_dst_scope=False):
"""Execute the transformation.
Args:
sgv: the source subgraph-view.
dst_graph: the destination graph.
dst_scope: the destination scope.
src_scope: the source scope, which specify the path from which the
relative path of the transformed nodes are computed. For instance, if
src_scope is a/ and dst_scoped is b/, then the node a/x/y will have a
relative path of x/y and will be transformed into b/x/y.
reuse_dst_scope: if True the dst_scope is re-used if it already exists.
Otherwise, the scope is given a unique name based on the one given
by appending an underscore followed by a digit (default).
Returns:
A tuple `(sgv, info)` where:
`sgv` is the transformed subgraph view;
`info` is an instance of TransformerInfo containing
information about the transform, including mapping between
original and transformed tensors and operations.
Raises:
ValueError: if the arguments are invalid.
"""
sgv = subgraph.make_view(sgv)
if not isinstance(dst_graph, tf_ops.Graph):
raise TypeError("Expected a tf.Graph, got: {}".format(type(dst_graph)))
src_scope = util.scope_finalize(src_scope)
dst_scope = util.scope_finalize(dst_scope)
# Potentially create new scope if reuse_dst_scope is False
if dst_scope and not reuse_dst_scope:
dst_scope = util.scope_finalize(dst_graph.unique_name(dst_scope[:-1]))
# Create temporary info used during this transform call
info = _TmpInfo(sgv, dst_graph, dst_scope, src_scope)
self._copy_ops(info)
self._finalize_cycles(info)
self._connect_control_inputs(info)
# Compute information about the transformation
res_info = TransformerInfo(info)
sgv_ = self._transform_sgv(info, sgv)
return sgv_, res_info
def _copy_ops(self, info):
"""Copy ops without connecting them."""
sorted_ops = sorted(info.sgv.ops, key=lambda op: op._id) # pylint: disable=protected-access
for op in sorted_ops:
new_inputs = [self._transformed_t(info, t, op) for t in op.inputs]
op_, op_outputs_ = self.transform_op_handler(info, op, new_inputs)
if op is op_:
raise ValueError("In-place transformation not allowed.")
# Process op.
info.transformed_ops[op] = op_
self.assign_collections_handler(info, op, op_)
# Process output tensors.
for op_output, op_output_ in zip(op.outputs, op_outputs_):
info.transformed_ts[op_output] = op_output_
self.assign_collections_handler(info, op_output, op_output_)
def _finalize_cycles(self, info):
"""Reconnects the cyclic tensors."""
for t, tmp_t_, consumer_op in info.tmp_cyclic_ts:
if t not in info.transformed_ts:
raise ValueError("The tensor {} should be transformed by now.".format(
t.name))
if consumer_op not in info.transformed_ops:
raise ValueError("The op {} should be transformed by now.".format(
consumer_op.name))
t_ = info.transformed_ts[t]
consumer_op_ = info.transformed_ops[consumer_op]
t_index_ = list(consumer_op_.inputs).index(tmp_t_)
consumer_op_._update_input(t_index_, t_, update_dtype=False) # pylint: disable=protected-access
def _connect_control_inputs(self, info):
"""Connect the previously copied ops."""
for op in info.sgv.ops:
logging.debug("Connecting control inputs of op: %s", op.name)
op_ = info.transformed_ops[op]
# Finalize original op.
# TODO(fkp): Stop worrying about _original_op and remove this code?
# pylint: disable=protected-access
if op._original_op:
original_op = self.transform_original_op_handler(info, op._original_op)
if original_op is None:
logging.debug("Could not find original op for: %s", op_.name)
else:
op_._original_op = original_op
# pylint: enable=protected-access
# Finalize control inputs:
control_inputs_ = [self.transform_control_input_handler(info, ci)
for ci in op.control_inputs]
control_inputs_ = [ci for ci in control_inputs_ if ci is not None]
reroute.add_control_inputs(op_, control_inputs_)
def _transform_sgv(self, info, sgv):
"""Transform a subgraph view.
For convenience, a transform operation returns a subgraph view of the
transformed graph.
Args:
info: Temporary information for this transorfm call.
sgv: the subgraph to be transformed.
Returns:
The transformed subgraph.
"""
ops_ = [op_ for _, op_ in iteritems(info.transformed_ops)]
sgv_ = subgraph.SubGraphView(ops_)
sgv_inputs_ = sgv_.inputs
sgv_outputs_ = sgv_.outputs
# re-order inputs
input_map_ = []
for input_t in sgv.inputs:
if input_t not in info.transformed_ts:
continue
input_t_ = info.transformed_ts[input_t]
if input_t_ not in sgv_inputs_:
continue
input_t_index_ = sgv_.input_index(input_t_)
input_map_.append(input_t_index_)
# re-order outputs
output_map_ = []
for output_t in sgv.outputs:
if output_t not in info.transformed_ts:
continue
output_t_ = info.transformed_ts[output_t]
if output_t_ not in sgv_outputs_:
continue
output_t_index_ = sgv_.output_index(output_t_)
output_map_.append(output_t_index_)
return sgv_.remap(input_map_, output_map_)
def _transformed_t(self, info, t, consumer_op):
"""Return tre transformed tensor of `t`."""
if t in info.transformed_ts:
# If op is in the subgraph, just return its transformed counterpart.
return info.transformed_ts[t]
if t in info.sgv_inputs_set:
# `t` is an input of the subgraph.
return self.transform_external_input_handler(info, t)
elif t.op in info.ops:
# `t` is an internal tensor but is not transformed yet because it
# belongs to a graph cycle.
logging.debug("Cyclic tensor: t.name = %s", t.name)
# Try to find an existing tensor we can use for now,
# otherwise create one. We'll rewire this later.
if consumer_op.type == "Merge":
first_input = consumer_op.inputs[0]
tmp_t_ = self._transformed_t(info, first_input, consumer_op)
elif t.op.type == "Enter":
enter_input = t.op.inputs[0]
tmp_t_ = self._transformed_t(info, enter_input, consumer_op)
else:
with info.graph_.as_default():
tmp_t_ = util.make_placeholder_from_tensor(t, scope=info.scope_,
prefix="geph_tmp")
logging.debug("Created temporary placeholder: %s.", tmp_t_.name)
# Register as temporary and return.
info.tmp_cyclic_ts.append((t, tmp_t_, consumer_op))
return tmp_t_
else:
# `t` is a hidden input of the subgraph.
return self.transform_external_hidden_input_handler(info, t)
def copy(sgv, dst_graph=None, dst_scope="", src_scope="",
reuse_dst_scope=False):
"""Copy a subgraph.
Args:
sgv: the source subgraph-view. This argument is converted to a subgraph
using the same rules than the function subgraph.make_view.
dst_graph: the destination graph.
dst_scope: the destination scope.
src_scope: the source scope.
reuse_dst_scope: if True the dst_scope is re-used if it already exists.
Otherwise, the scope is given a unique name based on the one given
by appending an underscore followed by a digit (default).
Returns:
A tuple `(sgv, info)` where:
`sgv` is the transformed subgraph view;
`info` is an instance of TransformerInfo containing
information about the transform, including mapping between
original and transformed tensors and operations.
Raises:
TypeError: if `dst_graph` is not a `tf.Graph`.
StandardError: if sgv cannot be converted to a SubGraphView using
the same rules than the function subgraph.make_view.
"""
sgv = subgraph.make_view(sgv)
if dst_graph is None:
dst_graph = sgv.graph
if not isinstance(dst_graph, tf_ops.Graph):
raise TypeError("Expected a tf.Graph, got: {}".format(type(dst_graph)))
copier = Transformer()
return copier(
sgv, dst_graph, dst_scope, src_scope, reuse_dst_scope=reuse_dst_scope)
def copy_with_input_replacements(sgv, replacement_ts,
dst_graph=None, dst_scope="", src_scope="",
reuse_dst_scope=False):
"""Copy a subgraph, replacing some of its inputs.
Note a replacement only happens if the tensor to be replaced
is an input of the given subgraph. The inputs of a subgraph can
be queried using sgv.inputs.
Args:
sgv: the source subgraph-view. This argument is converted to a subgraph
using the same rules as the function subgraph.make_view.
replacement_ts: dictionary mapping from original tensors to the
replaced one.
dst_graph: the destination graph.
dst_scope: the destination scope.
src_scope: the source scope.
reuse_dst_scope: if True the dst_scope is re-used if it already exists.
Otherwise, the scope is given a unique name based on the one given
by appending an underscore followed by a digit (default).
Returns:
A tuple `(sgv, info)` where:
`sgv` is the transformed subgraph view;
`info` is an instance of TransformerInfo containing
information about the transform, including mapping between
original and transformed tensors and operations.
Raises:
TypeError: if dst_graph is not a tf.Graph.
StandardError: if sgv cannot be converted to a SubGraphView using
the same rules as the function subgraph.make_view.
"""
sgv = subgraph.make_view(sgv)
if dst_graph is None:
dst_graph = sgv.graph
if not isinstance(dst_graph, tf_ops.Graph):
raise TypeError("Expected a tf.Graph, got: {}".format(type(dst_graph)))
copier = Transformer()
# Replace tensor if possible.
def replace_t_with_replacement_handler(info, t):
if t in replacement_ts:
return replacement_ts[t]
else:
return keep_t_if_possible_handler(info, t)
copier.transform_external_input_handler = replace_t_with_replacement_handler
return copier(
sgv, dst_graph, dst_scope, src_scope, reuse_dst_scope=reuse_dst_scope)
def _add_control_flow_ops(ops, control_ios):
"""Complete `ops` so that the transformed graph is valid.
Partially copying a graph can lead to a malformed graph. For instance,
copying half of a while construct is likely to result in an invalid graph.
This function attempts to add missing ops so that the transformation result
in a valid graph.
Args:
ops: list of ops (modifed in-place).
control_ios: object created by a call to `util.ControlOutputs`.
"""
# Find while contexts.
control_flow_contexts = set()
for op in ops:
cfc = op._control_flow_context # pylint: disable=protected-access
if cfc:
control_flow_contexts.add(cfc)
# Find new ops.
new_ops = []
for cfc in control_flow_contexts:
if cfc.IsWhileContext():
new_ops += select.get_walks_intersection_ops(
[enter_t.op for enter_t in cfc.loop_enters],
[exit_t.op for exit_t in cfc.loop_exits],
control_ios=control_ios)
# Add new ops.
new_ops_set = set(new_ops)
ops_set = frozenset(ops)
for op in new_ops_set:
if op not in ops_set:
ops.append(op)
def graph_replace(target_ts, replacement_ts, dst_scope="",
src_scope="", reuse_dst_scope=False):
"""Create a new graph which compute the targets from the replaced Tensors.
Args:
target_ts: a single tf.Tensor or an iterable of tf.Tensor.
replacement_ts: dictionary mapping from original tensors to replaced tensors
dst_scope: the destination scope.
src_scope: the source scope.
reuse_dst_scope: if True the dst_scope is re-used if it already exists.
Otherwise, the scope is given a unique name based on the one given
by appending an underscore followed by a digit (default).
Returns:
A single tf.Tensor or a list of target tf.Tensor, depending on
the type of the input argument `target_ts`.
The returned tensors are recomputed using the tensors from replacement_ts.
Raises:
ValueError: if the targets are not connected to replacement_ts.
"""
# Identify operations in the graph that will change.
# Start forward walk at Tensors that will be replaced, and
# backward walk at the target output Tensors.
flatten_target_ts = util.flatten_tree(target_ts)
# Construct the forward control dependencies edges so that
# the get_walks_intersection_ops can also traverse the
# control dependencies.
graph = util.get_unique_graph(flatten_target_ts, check_types=(tf_ops.Tensor))
control_ios = util.ControlOutputs(graph)
ops = select.get_walks_intersection_ops(list(iterkeys(replacement_ts)),
flatten_target_ts,
control_ios=control_ios)
if not ops:
raise ValueError("Targets and replacements are not connected!")
# Complete ops to avoid malformed control flow.
# TODO(fkp): Consider moving this function deeper (in the transformer?).
_add_control_flow_ops(ops, control_ios)
# Create a copy of the relevant subgraph
unused_sgv_, info = copy_with_input_replacements(
ops, replacement_ts, None, dst_scope, src_scope, reuse_dst_scope)
# Return the transformed targets but keep the original if the transformed
# counterpart cannot be found
missing_fn = lambda original_t: original_t
return info.transformed(target_ts, missing_fn)
| {
"content_hash": "4cf451e01799a625bf40968c031a5b5c",
"timestamp": "",
"source": "github",
"line_count": 743,
"max_line_length": 102,
"avg_line_length": 38.1507402422611,
"alnum_prop": 0.6809426374091583,
"repo_name": "nburn42/tensorflow",
"id": "592d37b432ee605d74162e0b8ec6ccdf426c45d1",
"size": "29035",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/graph_editor/transform.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9274"
},
{
"name": "C",
"bytes": "341132"
},
{
"name": "C++",
"bytes": "39824558"
},
{
"name": "CMake",
"bytes": "194702"
},
{
"name": "Go",
"bytes": "1046987"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "590137"
},
{
"name": "Jupyter Notebook",
"bytes": "1940883"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "48231"
},
{
"name": "Objective-C",
"bytes": "12456"
},
{
"name": "Objective-C++",
"bytes": "94385"
},
{
"name": "PHP",
"bytes": "2140"
},
{
"name": "Perl",
"bytes": "6179"
},
{
"name": "Perl 6",
"bytes": "1357"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "33704964"
},
{
"name": "Ruby",
"bytes": "533"
},
{
"name": "Shell",
"bytes": "426212"
}
],
"symlink_target": ""
} |
"""
"Strong star normal form"
From "The complexity of regular-(like)-expressions"
http://www.springerlink.com/content/978-3-642-14454-7/#section=754924&page=3&locus=34
"""
## Many(Seq(Optional(Lit('x')), Optional(Lit('y')))).black()
#. ((x|y))*
def normalize(re): return re.black()
class Lit:
nullable = False
def __init__(self, c):
self.c = c
def __repr__(self): return self.c
def white(self): return self
def black(self): return self
class Alt:
def __init__(self, re1, re2):
self.re1 = re1
self.re2 = re2
self.nullable = re1.nullable or re2.nullable
def __repr__(self):
return '(%r|%r)' % (self.re1, self.re2)
def white(self):
return Alt(self.re1.white(), self.re2.white())
def black(self):
return Alt(self.re1.black(), self.re2.black())
class Optional():
nullable = True
def __init__(self, re):
self.re = re
def __repr__(self):
return '(%r)?' % (self.re)
def white(self):
return self.re.white()
def black(self):
return self.re.black() if self.re.nullable else Optional(self.re.black())
class Many:
nullable = True
def __init__(self, re):
self.re = re
def __repr__(self):
return '(%r)*' % (self.re)
def white(self):
return self.re.white()
def black(self):
return Many(self.re.black().white())
class Seq:
def __init__(self, re1, re2):
self.re1 = re1
self.re2 = re2
self.nullable = re1.nullable and re2.nullable
def __repr__(self):
return '%r%r' % (self.re1, self.re2)
def white(self):
return Alt(self.re1.white(), self.re2.white()) if self.nullable else self
def black(self):
return Seq(self.re1.black(), self.re2.black())
| {
"content_hash": "56d60aa0c43715a088eee9930e7ca84b",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 85,
"avg_line_length": 27.984375,
"alnum_prop": 0.5745393634840871,
"repo_name": "JaDogg/__py_playground",
"id": "31fdc0fd11f419f2538acfeef1d5373f41acf149",
"size": "1791",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reference/sketchbook/regex/normalize.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ANTLR",
"bytes": "951976"
},
{
"name": "Assembly",
"bytes": "24809"
},
{
"name": "C",
"bytes": "205198"
},
{
"name": "C#",
"bytes": "48314"
},
{
"name": "C++",
"bytes": "168261"
},
{
"name": "CSS",
"bytes": "2582"
},
{
"name": "Emacs Lisp",
"bytes": "1041"
},
{
"name": "Erlang",
"bytes": "2303"
},
{
"name": "GAP",
"bytes": "718192"
},
{
"name": "HTML",
"bytes": "6799330"
},
{
"name": "Haskell",
"bytes": "2061"
},
{
"name": "Java",
"bytes": "1063759"
},
{
"name": "JavaScript",
"bytes": "12710"
},
{
"name": "Lua",
"bytes": "278"
},
{
"name": "M",
"bytes": "5739"
},
{
"name": "Makefile",
"bytes": "5903"
},
{
"name": "Matlab",
"bytes": "23"
},
{
"name": "Objective-C",
"bytes": "134542"
},
{
"name": "PHP",
"bytes": "5958"
},
{
"name": "Pascal",
"bytes": "40255"
},
{
"name": "Python",
"bytes": "2197399"
},
{
"name": "Ruby",
"bytes": "2367"
},
{
"name": "Scheme",
"bytes": "52618"
},
{
"name": "Shell",
"bytes": "5743"
},
{
"name": "Swift",
"bytes": "11374"
},
{
"name": "TeX",
"bytes": "62560"
},
{
"name": "VHDL",
"bytes": "401678"
},
{
"name": "Visual Basic",
"bytes": "4107"
},
{
"name": "Yacc",
"bytes": "99342"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from django.db.models import (CharField, TextField,
BooleanField, ForeignKey,
SmallIntegerField)
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _
from deck.models import Event, Proposal, Vote, Jury
EVENT_DATA = {
'title': 'RuPy',
'slug': 'rupy',
'description': 'A really good event.',
'author_id': 1,
'is_published': False,
}
PROPOSAL_DATA = {
'title': 'Python For Zombies',
'slug': 'python-for-zombies',
'description': 'Brain...',
'author_id': 1
}
ANOTHER_PROPOSAL_DATA = {
'title': 'A Python 3 Metaprogramming Tutorial',
'slug': 'python-3-metaprogramming',
'description': 'An advanced tutorial on Python 3 and Metaprogramming',
'author_id': 1
}
class EventModelIntegrityTest(TestCase):
def setUp(self):
self.fields = {
field.name: field for field in Event._meta.fields
}
def test_assert_event_should_have_a_verbose_name(self):
self.assertEquals(_('Event'), Event._meta.verbose_name)
def test_assert_event_should_have_a_verbose_name_plural(self):
self.assertEquals(_('Events'), Event._meta.verbose_name_plural)
def test_assert_event_should_have_a_title(self):
self.assertIn('title', Event._meta.get_all_field_names())
def test_assert_event_title_should_be_a_CharField(self):
self.assertIsInstance(self.fields['title'], CharField)
def test_assert_event_title_should_be_required(self):
self.assertEquals(False, self.fields['title'].null)
self.assertEquals(False, self.fields['title'].blank)
def test_assert_event_title_should_have_at_most_200_characters(self):
self.assertEquals(200, self.fields['title'].max_length)
def test_assert_event_should_have_a_description(self):
self.assertIn('description', Event._meta.get_all_field_names())
def test_assert_event_description_should_be_a_TextField(self):
self.assertIsInstance(self.fields['description'], TextField)
def test_assert_event_description_should_be_required(self):
self.assertEquals(False, self.fields['description'].null)
self.assertEquals(False, self.fields['description'].blank)
def test_assert_event_description_should_have_at_most_10000_characters(self):
self.assertEquals(10000, self.fields['description'].max_length)
def test_assert_event_should_allow_public_voting(self):
self.assertIn('allow_public_voting', Event._meta.get_all_field_names())
def test_assert_event_allow_public_voting_should_be_a_BooleanField(self):
self.assertIsInstance(self.fields['allow_public_voting'], BooleanField)
def test_assert_event_allow_public_voting_should_be_True_as_default(self):
self.assertEquals(True, self.fields['allow_public_voting'].default)
def test_assert_event_should_have_a_author(self):
self.assertIn('author', Event._meta.get_all_field_names())
def test_assert_event_author_should_be_an_User(self):
self.assertEquals(User, self.fields['author'].rel.to)
def test_assert_event_author_should_be_a_ForeignKey(self):
self.assertIsInstance(self.fields['author'], ForeignKey)
def test_assert_event_author_should_be_required(self):
self.assertEquals(False, self.fields['author'].null)
self.assertEquals(False, self.fields['author'].blank)
def test_assert_event_author_should_have_a_related_name(self):
self.assertEquals('events', self.fields['author'].rel.related_name)
def test_assert_event_should_have_a_publish_flag(self):
self.assertIn('is_published', Event._meta.get_all_field_names())
def test_assert_event_is_published_should_be_a_BooleanField(self):
self.assertIsInstance(self.fields['is_published'], BooleanField)
def test_assert_event_is_published_should_be_True_as_default(self):
self.assertEquals(True, self.fields['is_published'].default)
def test_assert_event_should_have_a_jury(self):
self.assertIn('jury', Event._meta.get_all_field_names())
def test_assert_event_jury_should_be_an_Jury(self):
self.assertEquals(Jury, self.fields['jury'].rel.to)
def test_assert_event_jury_should_be_a_ForeignKey(self):
self.assertIsInstance(self.fields['jury'], ForeignKey)
def test_assert_event_jury_should_not_be_required(self):
self.assertEquals(True, self.fields['jury'].null)
self.assertEquals(True, self.fields['jury'].blank)
def test_assert_event_jury_should_have_a_related_name(self):
self.assertEquals('event', self.fields['jury'].rel.related_name)
class EventObjectTest(TestCase):
def setUp(self):
self.event = Event(**EVENT_DATA)
def test_assert_event_unicode_representation(self):
self.assertEquals(u'RuPy', unicode(self.event))
def test_assert_event_title(self):
self.assertEquals(u'RuPy', self.event.title)
def test_assert_event_description(self):
self.assertEquals(u'A really good event.', self.event.description)
def test_assert_event_author(self):
self.assertEquals(1, self.event.author_id)
def test_assert_event_allow_public_voting(self):
self.assertEquals(True, self.event.allow_public_voting)
def test_assert_event_is_published(self):
self.assertEquals(False, self.event.is_published)
class ProposalModelIntegrityTest(TestCase):
def setUp(self):
self.fields = {
field.name: field for field in Proposal._meta.fields
}
def test_assert_proposal_should_have_a_verbose_name(self):
self.assertEquals(_('Proposal'), Proposal._meta.verbose_name)
def test_assert_proposal_should_have_a_verbose_name_plural(self):
self.assertEquals(_('Proposals'), Proposal._meta.verbose_name_plural)
def test_assert_proposal_should_have_a_title(self):
self.assertIn('title', Proposal._meta.get_all_field_names())
def test_assert_proposal_title_should_be_a_CharField(self):
self.assertIsInstance(self.fields['title'], CharField)
def test_assert_proposal_title_should_be_required(self):
self.assertEquals(False, self.fields['title'].null)
self.assertEquals(False, self.fields['title'].blank)
def test_assert_proposal_title_should_have_at_most_200_characters(self):
self.assertEquals(200, self.fields['title'].max_length)
def test_assert_proposal_should_have_a_description(self):
self.assertIn('description', Proposal._meta.get_all_field_names())
def test_assert_proposal_description_should_be_a_TextField(self):
self.assertIsInstance(self.fields['description'], TextField)
def test_assert_proposal_description_should_be_required(self):
self.assertEquals(False, self.fields['description'].null)
self.assertEquals(False, self.fields['description'].blank)
def test_assert_proposal_description_should_have_10000_characters(self):
self.assertEquals(10000, self.fields['description'].max_length)
def test_assert_proposal_should_have_a_author(self):
self.assertIn('author', Proposal._meta.get_all_field_names())
def test_assert_proposal_author_should_be_an_User(self):
self.assertEquals(User, self.fields['author'].rel.to)
def test_assert_proposal_author_should_be_a_ForeignKey(self):
self.assertIsInstance(self.fields['author'], ForeignKey)
def test_assert_proposal_author_should_be_required(self):
self.assertEquals(False, self.fields['author'].null)
self.assertEquals(False, self.fields['author'].blank)
def test_assert_proposal_event_should_have_a_related_name(self):
self.assertEquals('proposals', self.fields['event'].rel.related_name)
def test_assert_proposal_should_have_a_event(self):
self.assertIn('event', Proposal._meta.get_all_field_names())
def test_assert_proposal_event_should_be_an_Event(self):
self.assertEquals(Event, self.fields['event'].rel.to)
def test_assert_proposal_event_should_be_a_ForeignKey(self):
self.assertIsInstance(self.fields['event'], ForeignKey)
def test_assert_proposal_event_should_be_required(self):
self.assertEquals(False, self.fields['event'].null)
self.assertEquals(False, self.fields['event'].blank)
def test_assert_proposal_should_have_a_publish_flag(self):
self.assertIn('is_published', Proposal._meta.get_all_field_names())
def test_assert_proposal_is_published_should_be_a_BooleanField(self):
self.assertIsInstance(self.fields['is_published'], BooleanField)
def test_assert_proposal_is_published_should_be_True_as_default(self):
self.assertEquals(True, self.fields['is_published'].default)
def test_assert_proposal_is_approved_should_be_a_BooleanField(self):
self.assertIsInstance(self.fields['is_approved'], BooleanField)
def test_assert_proposal_is_approved_should_be_False_as_default(self):
self.assertEquals(False, self.fields['is_approved'].default)
class ProposalObjectTest(TestCase):
fixtures = ['user.json']
def setUp(self):
self.user = User.objects.first()
self.event = Event(**EVENT_DATA)
self.proposal = Proposal(**PROPOSAL_DATA)
self.vote = Vote(user_id=self.event.author_id,
proposal=self.proposal, rate=3)
def test_assert_proposal_unicode_representation(self):
self.assertEquals(u'Python For Zombies', unicode(self.proposal))
def test_assert_proposal_title(self):
self.assertEquals(u'Python For Zombies', self.proposal.title)
def test_assert_proposal_description(self):
self.assertEquals(u'Brain...', self.proposal.description)
def test_assert_proposal_author(self):
self.assertEquals(1, self.proposal.author_id)
def test_assert_proposal_rate(self):
self.assertEquals(0, self.proposal.get_rate)
def test_assert_user_cannot_vote_multiple_times(self):
self.event.save()
self.proposal.event = self.event
self.proposal.author = User.objects.get(id=2)
self.proposal.save()
self.vote.proposal = self.proposal
self.vote.save()
self.assertTrue(self.proposal.user_already_voted(self.user))
def test_assert_proposal_is_published(self):
self.assertEquals(True, self.proposal.is_published)
def test_assert_proposal_approve(self):
self.event.save()
self.proposal.event = self.event
self.proposal.save()
self.assertEquals(False, self.proposal.is_approved)
self.proposal.approve()
self.assertEquals(True, self.proposal.is_approved)
def test_assert_proposal_disapprove(self):
self.event.save()
self.proposal.event = self.event
self.proposal.is_approved = True
self.proposal.save()
self.assertEquals(True, self.proposal.is_approved)
self.proposal.disapprove()
self.assertEquals(False, self.proposal.is_approved)
class VoteModelIntegrityTest(TestCase):
def setUp(self):
self.fields = {
field.name: field for field in Vote._meta.fields
}
def test_assert_vote_should_have_a_verbose_name(self):
self.assertEquals(_('Vote'), Vote._meta.verbose_name)
def test_assert_vote_should_have_a_verbose_name_plural(self):
self.assertEquals(_('Votes'), Vote._meta.verbose_name_plural)
def test_assert_vote_should_have_a_unique_together_constraint(self):
self.assertEquals((('proposal', 'user'),), Vote._meta.unique_together)
def test_assert_vote_should_have_a_rate(self):
self.assertIn('rate', Vote._meta.get_all_field_names())
def test_assert_vote_rate_should_be_a_SmallIntegerField(self):
self.assertIsInstance(self.fields['rate'], SmallIntegerField)
def test_assert_vote_rate_should_be_required(self):
self.assertEquals(True, self.fields['rate'].null)
self.assertEquals(True, self.fields['rate'].blank)
def test_assert_vote_should_have_a_proposal(self):
self.assertIn('proposal', Vote._meta.get_all_field_names())
def test_assert_vote_proposal_should_be_an_Proposal(self):
self.assertEquals(Proposal, self.fields['proposal'].rel.to)
def test_assert_vote_proposal_should_be_a_ForeignKey(self):
self.assertIsInstance(self.fields['proposal'], ForeignKey)
def test_assert_vote_proposal_should_be_required(self):
self.assertEquals(False, self.fields['proposal'].null)
self.assertEquals(False, self.fields['proposal'].blank)
def test_assert_vote_proposal_should_have_a_related_name(self):
self.assertEquals('votes', self.fields['proposal'].rel.related_name)
def test_assert_vote_should_have_a_author(self):
self.assertIn('user', Vote._meta.get_all_field_names())
def test_assert_vote_user_should_be_an_User(self):
self.assertEquals(User, self.fields['user'].rel.to)
def test_assert_vote_user_should_be_a_ForeignKey(self):
self.assertIsInstance(self.fields['user'], ForeignKey)
def test_assert_vote_user_should_be_required(self):
self.assertEquals(False, self.fields['user'].null)
self.assertEquals(False, self.fields['user'].blank)
def test_assert_vote_event_should_have_a_related_name(self):
self.assertEquals('votes', self.fields['user'].rel.related_name)
class VoteObjectTest(TestCase):
def setUp(self):
self.event = Event(**EVENT_DATA)
self.proposal = Proposal(event=self.event, **PROPOSAL_DATA)
self.vote = Vote(user_id=self.event.author_id,
proposal=self.proposal, rate=3)
def test_assert_vote_unicode_representation(self):
self.vote.user = User(username='User')
self.assertEquals(u'User: 3 in Python For Zombies', unicode(self.vote))
def test_assert_vote_rate(self):
self.assertEquals(3, self.vote.rate)
def test_assert_vote_proposal(self):
self.assertEquals(self.proposal, self.vote.proposal)
def test_assert_vote_author(self):
self.assertEquals(1, self.vote.user_id)
| {
"content_hash": "7c89e3591b40ae40a767d62d5ce6ae00",
"timestamp": "",
"source": "github",
"line_count": 362,
"max_line_length": 81,
"avg_line_length": 39.25966850828729,
"alnum_prop": 0.687799043062201,
"repo_name": "felipevolpone/speakerfight",
"id": "eb067c8b2fe46d0a9b013a62d7aaacd0c5a47fef",
"size": "14212",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "deck/tests/test_unit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "51963"
},
{
"name": "HTML",
"bytes": "52228"
},
{
"name": "JavaScript",
"bytes": "10877"
},
{
"name": "Python",
"bytes": "112200"
}
],
"symlink_target": ""
} |
"""
The bld lib provides build helper tools.
"""
#
# Package's version
#
try:
from ._version import version as __version__
except ImportError:
# broken installation, we don't even try
__version__ = "unknown"
| {
"content_hash": "db074f994150870823f11dc7f83a2a27",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 48,
"avg_line_length": 18.416666666666668,
"alnum_prop": 0.6606334841628959,
"repo_name": "osechet/bld",
"id": "a600941be74112eeeafe7f827e3c31242572f8fb",
"size": "221",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bldlib/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "48388"
}
],
"symlink_target": ""
} |
"""Completion evaluation code for JavaScript"""
import logging
import types
import re
from pprint import pformat
from itertools import chain
from codeintel2.common import *
from codeintel2.util import indent
from codeintel2.tree import TreeEvaluator
class CandidatesForTreeEvaluator(TreeEvaluator):
# Note: the "alt" changes added in change 281350 make some of the
# functionality on this class *not* appropriate for the shared
# TreeEvaluator. I.e. _elem_from_scoperef et al should be moved
# *out* of CandidatesForTreeEvaluator.
# This is a dict when set, multiple elements that have the same lpath will
# be set in here, ensuring we get the correct one from an lpath lookup.
# Fixes the following bug:
# http://bugs.activestate.com/show_bug.cgi?id=71666
# Ideally, this would not be needed once elem.names[] can return a tuple,
# see the following bug for reference:
# http://bugs.activestate.com/show_bug.cgi?id=71941
_alt_elem_from_scoperef = None
def _elem_from_scoperef(self, scoperef):
"""A scoperef is (<blob>, <lpath>). Return the actual elem in
the <blob> ciElementTree being referred to.
"""
elem = scoperef[0]
i = 0
for lname in scoperef[1]:
i += 1
if self._alt_elem_from_scoperef is not None:
scoperef_names = ".".join(scoperef[1][:i])
alt_elem = self._alt_elem_from_scoperef.get(scoperef_names)
if alt_elem is not None:
elem = alt_elem
continue
elem = elem.names[lname]
return elem
def _tokenize_citdl_expr(self, expr):
chars = iter(zip(expr, chain(expr[1:], (None,))))
buffer = []
def get_pending_token():
if buffer:
yield "".join(buffer)
del buffer[:]
def get_quoted_string(ch):
quote = ch
local_buffer = []
for ch, next_ in chars:
# print "quote: quote=[%s] ch=[%s] next=[%s] token=%r" % (
# quote, ch, next_, local_buffer)
if ch == "\\":
local_buffer.append(next(chars)[0])
elif ch == quote:
if local_buffer:
yield "".join(local_buffer)
break
else:
local_buffer.append(ch)
BLOCK_MAP = {"(": ")", "[": "]"}
for ch, next_ in chars:
# print "ch=[%s] next=[%s] token=%r" % (ch, next_, buffer)
if ch in ('"', "'"): # quoted string
for token in get_pending_token():
yield token
for token in get_quoted_string(ch):
yield token
elif ch == ".":
for token in get_pending_token():
yield token
buffer = []
elif ch in BLOCK_MAP:
block = [ch, BLOCK_MAP[ch]]
emit = ch in ("[",)
for token in get_pending_token():
yield token
if next_ == block[1]:
next(chars) # consume close quote
yield block[0] + block[1]
elif next_ in ('"', "'"): # quoted string
next(chars) # consume open bracket
next_tokens = list(get_quoted_string(next_))
ch, next_ = next(chars)
if ch == block[1] and emit:
for next_token in next_tokens:
yield next_token
else:
yield block[0] + block[1]
else:
buffer.append(ch)
if buffer:
yield "".join(buffer)
def _join_citdl_expr(self, tokens):
return '.'.join(tokens).replace('.()', '()')
class JavaScriptTreeEvaluator(CandidatesForTreeEvaluator):
def eval_cplns(self):
self.log_start()
start_scoperef = self.get_start_scoperef()
self.info("start scope is %r", start_scoperef)
if self.trg.type == "names":
cplns = list(self._completion_names_from_scope(self.expr,
start_scoperef))
else:
hits = self._hits_from_citdl(self.expr, start_scoperef)
cplns = list(self._members_from_hits(hits))
if not cplns:
raise CodeIntelError("No completions found")
# For logging messages every call
# print indent('\n'.join("%s: %s" % (lvl, args and m % (args) or m)
# for lvl,m, args in self.ctlr.log))
# print indent('\n'.join(["Hit: %r" % (cpln, ) for cpln in cplns]))
return cplns
def eval_calltips(self):
self.log_start()
start_scoperef = self.get_start_scoperef()
self.info("start scope is %r", start_scoperef)
hits = self._hits_from_citdl(self.expr, start_scoperef)
if not hits:
raise CodeIntelError("No calltips found")
return self._calltips_from_hits(hits)
def eval_defns(self):
self.log_start()
start_scoperef = self.get_start_scoperef()
self.info("start scope is %r", start_scoperef)
hits = self._hits_from_citdl(self.expr, start_scoperef, defn_only=True)
if not hits:
raise CodeIntelError("No definitions found")
return [self._defn_from_hit(x) for x in hits]
def parent_scoperef_from_scoperef(self, scoperef,
started_in_builtin_window_scope=False):
"""
For JavaScript-in-the-browser the top-level scope is the
Window object instance. For now we are always presuming we
are running in the browser if the language is JavaScript.
Problem: if we *started* on the Window class then the parent
scope should be -> built-in-blob. This is what
'started_in_builtin_window_scope' is used for.
"""
blob, lpath = scoperef
global_var = self._global_var
if not started_in_builtin_window_scope \
and lpath == [global_var] and blob is self.built_in_blob:
return None
elif lpath:
return (blob, lpath[:-1])
elif blob is self.built_in_blob:
if started_in_builtin_window_scope:
return None
elif global_var is not None:
return (self.built_in_blob, [global_var])
else:
return (self.built_in_blob, [])
@property
def _global_var(self):
"""
The type of the global variable
"""
if self.trg.lang == "Node.js":
return "global"
return "Window"
_langintel = None
@property
def langintel(self):
if self._langintel is None:
self._langintel = self.mgr.langintel_from_lang(self.trg.lang)
return self._langintel
_libs = None
@property
def libs(self):
if self._libs is None:
self._libs = self.langintel.libs_from_buf(self.buf)
return self._libs
@property
def stdlib(self):
# JS stdlib is always the last one.
return self.libs[-1]
_built_in_blob = None
@property
def built_in_blob(self):
if self._built_in_blob is None:
self._built_in_blob = self.stdlib.get_blob("*")
return self._built_in_blob
## Specific element completions ##
def _hit_from_first_token(self, token, scoperef):
"""Find the token at the given or a parent scope.
Returns the found elem and the scope at which it was found. If
not found, this returns (None, None).
"""
self.log("find '%s' starting at %s", token, scoperef)
# Because we fake JavaScript classes and put the ctor
# function inside the class, we need to push start scopes at
# the class to the ctor. See test
# javascript/cpln/ctor_scope_cheat for an example of why.
try:
elem = self._elem_from_scoperef(scoperef)
except KeyError as ex:
self.warn("_hit_from_first_token:: no elem for scoperef: %r",
scoperef)
return (None, None)
if elem.get("ilk") == "class":
class_name = elem.get("name")
try:
ctor = elem.names[class_name]
except KeyError:
pass
else:
if "__ctor__" in ctor.get("attributes", ""):
scoperef = (scoperef[0], scoperef[1]+[class_name])
self.log("push scope to class ctor %s", scoperef)
started_in_builtin_window_scope = (scoperef[0] is self.built_in_blob
and scoperef[1] and scoperef[1][0] == self._global_var)
while 1:
try:
elem = self._elem_from_scoperef(scoperef)
except KeyError as ex:
raise EvalError("could not resolve scoperef %r: %s"
% (scoperef, ex))
try:
candidate = elem.names[token]
if "__ctor__" in candidate.get("attributes", ""):
# In JavaScript we include the constructor
# function for a (faked) class as a method.
# We must skip it here or resolution of 'this'
# in a JS class methods will always hit the ctor
# instead of the class (which is by far the
# common case).
raise KeyError("skipping JavaScript ctor")
self.log("is '%s' accessible on %s? yes", token, scoperef)
return candidate, scoperef
except KeyError:
self.log("is '%s' accessible on %s? no", token, scoperef)
scoperef = self.parent_scoperef_from_scoperef(scoperef,
started_in_builtin_window_scope)
if not scoperef:
return None, None
def _members_from_hits(self, hits):
members = set()
curr_blob = self.buf.blob_from_lang.get(self.lang, None)
for elem, scope in hits:
# In JavaScript we include the constructor function for a
# (faked) class as a method. Completion on an instance of
# this class shouldn't see the ctor.
skip_js_ctor = (elem.tag == "scope" and elem.get("ilk") == "class")
if elem.get("ilk") == "function":
# Functions have an implicit citdl type of "Function". See bug:
# http://bugs.activestate.com/show_bug.cgi?id=76504
try:
subhits = self._hits_from_type_inference("Function", scope)
members.update(self._members_from_hits(subhits))
except CodeIntelError:
pass # Ignore if Function was not found
for child in elem:
if elem.get("ilk") == "function" and child.get("ilk") == "argument":
# function arguments are not members, skip them.
# (we might still find properties of functions, though)
continue
# Only add locals when the current scope is the same
# as the variable scope.
attributes = child.get("attributes", "").split()
if curr_blob is not None and scope[0] != curr_blob:
if "__file_local__" in attributes:
self.log("skipping file_local %r in %r", elem, scope)
continue
if "__local__" in attributes:
# XXX: Move start_scoperef to be a part of the class
# start_scoperef = self.get_start_scoperef()
# scope_elem = start_scoperef[0]
# for lname in start_scoperef[1]:
# if elem == scope_elem:
# members.add( ("variable", child.get("name")) )
# break
# scope_elem = scope_elem.names[lname]
# else: # Don't show this variable
continue
if child.tag == "scope":
if skip_js_ctor and child.get("ilk") == "function" \
and "__ctor__" in attributes:
continue
members.add((child.get("ilk"), child.get("name")))
elif child.tag == "variable":
if len(child):
members.add(("namespace", child.get("name")))
else:
members.add(("variable", child.get("name")))
else:
raise NotImplementedError("unknown hit child tag '%s': %r"
% (child.tag, child))
for classref in elem.get("classrefs", "").split():
try:
subhits = self._hits_from_type_inference(classref, scope)
members.update(self._members_from_hits(subhits))
except CodeIntelError:
pass # Ignore when parent class not found, bug 65447
return members
def _calltip_from_func(self, elem):
# See "Determining a Function CallTip" in the spec for a
# discussion of this algorithm.
signature = elem.get("signature")
doc = elem.get("doc")
ctlines = []
if not signature:
name = elem.get("name")
# XXX Note difference for Tcl in _getSymbolCallTips.
ctlines = [name + "(...)"]
else:
ctlines = signature.splitlines(0)
if doc:
ctlines += doc.splitlines(0)
return '\n'.join(ctlines)
def _calltip_from_class(self, elem):
# If the class has a defined signature then use that.
name = elem.get("name")
signature = elem.get("signature")
doc = elem.get("doc")
if signature:
ctlines = signature.splitlines(0)
if doc:
ctlines += doc.splitlines(0)
return '\n'.join(ctlines)
elif name in elem.names:
# Typically the class element has a contructor function of
# the same name as the class.
ctor = elem.names[name]
self.log("ctor is %r", ctor)
return self._calltip_from_func(ctor)
else:
ctlines = [name + "(...)"]
if doc:
ctlines += doc.splitlines(0)
return '\n'.join(ctlines)
def _calltips_from_hits(self, hits):
"""
c.f. CitadelEvaluator._getSymbolCallTips()
"""
calltips = []
for elem, scoperef in hits:
# self.log("calltip for hit: %r", hit)
if elem.tag == "variable":
# Ignore variable hits.
self.debug("_calltips_from_hits:: ignoring variable: %r", elem)
continue
elif elem.tag == "scope":
ilk = elem.get("ilk")
if ilk == "function":
calltips.append(self._calltip_from_func(elem))
elif ilk == "class":
calltips.append(self._calltip_from_class(elem))
else:
raise NotImplementedError("unexpected scope ilk for "
"calltip hit: %r" % elem)
else:
raise NotImplementedError("unexpected elem for calltip "
"hit: %r" % elem)
## Bug 59438: adding "(from $lpath in $file)" when helpful
## in calltips.
## TODO: Don't include all (or part) when not useful:
## document.getElementsByClassName -> "(from document in
## prototype)". The "document in" in not necessary.
## TODO: Bad with empty lpath: "(from in prototype)"
## TODO: Problematic for test suite with "rand??" module names.
## TODO: Don't add for a local hit.
# blobname = scoperef[0].get("name")
# if blobname == "*":
# blobname = "stdlib"
# scopename = '.'.join(scoperef[1])
# calltips[-1] += "\n(from %s in %s)" % (scopename, blobname)
return calltips
def _hits_from_citdl(self, expr, scoperef, defn_only=False):
with self._check_infinite_recursion(expr):
if "[]" in expr:
# TODO: We cannot resolve array type inferences yet.
# Note that we do allow arrays types with a string key, since
# that's an alternative form for property access
raise CodeIntelError(
"no type-inference yet for arrays: %r" % expr)
tokens = list(self._tokenize_citdl_expr(expr))
# self.log("expr tokens: %r", tokens)
# First part... we try to match as much as possible straight up
hits, nconsumed = self._hits_from_first_part(tokens, scoperef)
if not hits:
raise CodeIntelError(
"could not resolve first part of '%s'" % expr)
self.debug("_hits_from_citdl: first part: %r -> %r",
tokens[:nconsumed], hits)
# ...the remainder.
remaining_tokens = tokens[nconsumed:]
for token in tokens[nconsumed:]:
new_hits = []
for elem, scoperef in hits:
self.debug("_hits_from_citdl: resolve %r on %r in %r",
token, elem, scoperef)
if token == "()":
try:
new_hits += self._hits_from_call(elem, scoperef)
except CodeIntelError as ex:
self.warn("could resolve call on %r: %s", elem, ex)
continue
try:
new_hit = self._hit_from_getattr(
elem, scoperef, token)
except CodeIntelError as ex:
if token == "prototype" and elem.get("ilk") == "class":
self.debug("_hits_from_citdl: using class %r for "
"its prototype", elem)
new_hits.append((elem, scoperef))
else:
self.warn(str(ex))
else:
new_hits.append(new_hit)
hits = new_hits
# Resolve any variable type inferences.
# XXX Don't we have to *recursively* resolve hits?
# If that is done, then need to watch out for infinite loop
# because _hits_from_variable_type_inference() for a variable
# with children just returns itself. I.e. you *can't* resolve
# the <variable> away.
resolved_hits = []
if self.buf:
curr_blob = self.buf.blob_from_lang.get(self.lang, {})
else:
curr_blob = None
for elem, scoperef in hits:
if scoperef[0] != curr_blob:
if "__file_local__" in elem.get("attributes", "").split():
self.log(
"skipping __file_local__ %r in %r", elem, scoperef)
continue
if elem.tag == "variable" and not defn_only:
try:
if (not elem.get("citdl")) and elem.get("ilk") == "argument":
# this is an argument, try to infer things from the
# caller
subhits = self._hits_from_argument(elem, scoperef)
else:
subhits = self._hits_from_variable_type_inference(
elem, scoperef)
except CodeIntelError as ex:
self.warn("could not resolve %r: %s", elem, ex)
else:
resolved_hits += subhits
else:
resolved_hits.append((elem, scoperef))
return resolved_hits
def _hits_from_argument(self, elem, scoperef):
"""
Return hits for an argument of a function based on its caller
@param elem The argument; must have ilk=argument
@param scoperef The scope containing the element
@returns list of hits
"""
assert elem.get("ilk") == "argument", \
"_hits_from_argument expects an argument, got a %r" % elem.get(
"ilk")
hits = []
scope = self._elem_from_scoperef(
scoperef) # the function the argument is in
args = [arg for arg in scope.findall(
"variable") if arg.get("ilk") == "argument"]
for pos in range(len(args)):
if args[pos].get("name") == elem.get("name"):
break
else:
# can't find the argument?
return []
for caller in scope.getiterator("caller"):
citdl = caller.get("citdl")
caller_pos = int(caller.get("pos") or 0) # 1-indexed
if citdl is None or caller_pos < 1:
# invalid caller
continue
for caller_hit in self._hits_from_citdl(citdl, scoperef):
caller_func = caller_hit[0] # the calling function
if caller_func.get("ilk") != "function":
# nevermind, not a function
continue
caller_args = [arg for arg in caller_func.getiterator(
"variable") if arg.get("ilk") == "argument"]
if caller_pos > len(caller_args):
# no such argument
continue
caller_arg = caller_args[caller_pos - 1]
citdl = caller_arg.get("citdl")
if not citdl:
continue
for citdl_hit in self._hits_from_citdl(citdl, caller_hit[1]):
# got the function being called, now look up the argument
# by pos
func = citdl_hit[0]
if func.get("ilk") != "function":
continue
args = [arg for arg in func.getiterator(
"variable") if arg.get("ilk") == "argument"]
if pos >= len(args):
continue
citdl = args[pos].get("citdl")
if not citdl:
continue
hits += self._hits_from_citdl(citdl, citdl_hit[1])
return hits
def _hits_from_call(self, elem, scoperef):
"""Resolve the function call inference for 'elem' at 'scoperef'."""
if elem.tag == "variable":
hits = []
var_hits = self._hits_from_variable_type_inference(elem, scoperef)
for var_elem, var_scoperef in var_hits:
if var_elem != elem:
try:
hits += self._hits_from_call(var_elem, var_scoperef)
except CodeIntelError:
pass # Keep trying other alternatives
if not hits:
raise CodeIntelError("could not resolve call on %r." % elem)
return hits
if elem.get("ilk") == "class":
return [(elem, scoperef)]
if elem.get("ilk") != "function":
raise CodeIntelError("_hits_from_call:: unexpected element type %r"
% elem)
# CommonJS / NodeJS hack
if elem.get("name") == "require" and \
scoperef[0] is self.built_in_blob and \
not scoperef[1]:
try:
requirename = self.trg.extra.get("_params", []).pop(0)
except IndexError:
requirename = None
if requirename is not None:
import codeintel2.lang_javascript
requirename = codeintel2.lang_javascript.Utils.unquoteJsString(
requirename)
self.log("_hits_from_call: resolving CommonJS require(%r)",
requirename)
hits = self._hits_from_commonjs_require(requirename, scoperef)
if len(hits) > 0:
return hits
resolver = getattr(elem, "resolve", None)
try:
param = self.trg.extra.get("_params", []).pop(0)
except IndexError:
param = None
if resolver and param is not None:
try:
self.log("Attempting to use extra resolver %r param %r",
resolver, param)
hits = resolver(evlr=self, action="call", scoperef=scoperef,
param=param)
if hits:
return hits
except:
self.log("Extra resolver %r: Failed to resolve %s",
resolver, scoperef)
else:
self.log("_hits_from_call: no resolver on %r", elem)
citdl = elem.get("returns")
if not citdl:
raise CodeIntelError("no return type info for %r" % elem)
self.log("_hits_from_call: resolve '%s' for %r, scoperef: %r",
citdl, elem, scoperef)
# scoperef has to be set to the function called
scoperef = (scoperef[0], scoperef[1]+[elem.get("name")])
return self._hits_from_citdl(citdl, scoperef)
def _hit_from_getattr(self, elem, scoperef, token):
"""Resolve the getattr of 'token' on the given 'elem'.
Raises CodeIntelError if could not resolve it.
Algorithm:
- Try to resolve it.
- Call a hook to make an educated guess. Some attribute names
are strong signals as to the object type -- typically those
for common built-in classes.
"""
self.log("resolve getattr '%s' on %r in %r:", token, elem, scoperef)
if elem.tag == "variable":
hits = self._hits_from_variable_type_inference(elem, scoperef)
elif elem.tag == "scope" and elem.get("ilk") == "function":
# Functions have an implicit citdl type of "Function". Bug 80880.
hits = self._hits_from_type_inference("Function", scoperef)
else:
assert elem.tag == "scope", "elem tag is not 'scope': %r" % elem.tag
hits = [(elem, scoperef)]
for hit_elem, hit_scoperef in hits:
self.log("_hit_from_getattr:: hit elem %r, scoperef: %r",
hit_elem, hit_scoperef)
ilk = hit_elem.get("ilk")
if hit_elem.tag == "variable":
attr = hit_elem.names.get(token)
if attr is not None:
self.log("attr is %r on %r", attr, hit_elem)
var_scoperef = (hit_scoperef[0],
hit_scoperef[1]+[hit_elem.get("name")])
return (attr, var_scoperef)
elif ilk == "function":
return self._hit_from_getattr(hit_elem, hit_scoperef, token)
elif ilk == "class":
attr = hit_elem.names.get(token)
if attr is not None:
self.log("attr is %r on %r", attr, hit_elem)
if hit_scoperef:
class_scoperef = (hit_scoperef[0],
hit_scoperef[1]+[hit_elem.get("name")])
# If this is a variable defined in a class, move the
# scope to become the position in the class where the
# variable was defined (usually the ctor class function)
# this ensures we get the right citdl lookup. See bug:
# http://bugs.activestate.com/show_bug.cgi?id=71343
lineno = int(attr.get("line", "-1"))
if attr.tag == "variable" and \
lineno > int(hit_elem.get("line", "-1")) and \
lineno <= int(hit_elem.get("lineend", "-1")):
# get the scope of the variable
blob, lpath = self.buf.scoperef_from_blob_and_line(
hit_elem,
lineno)
if lpath:
class_scoperef = (class_scoperef[0],
class_scoperef[1]+lpath)
self.log(
"Updating scoperef to: %r", class_scoperef)
else:
class_scoperef = (None, [hit_elem.get("name")])
return (attr, class_scoperef)
for classref in hit_elem.get("classrefs", "").split():
try:
base_hits = self._hits_from_type_inference(classref,
hit_scoperef)
except CodeIntelError:
pass # Ignore when parent class not found, bug 65447
else:
for base_elem, base_scoperef in base_hits:
if token in base_elem.names:
self.log("is '%s' from %s base class? yes",
token, base_elem)
new_scoperef = (base_scoperef[0],
base_scoperef[1] +
[base_elem.get("name")])
return (base_elem.names[token], new_scoperef)
self.log("is '%s' from %s base class? no", token,
base_elem)
else:
raise NotImplementedError("unexpected scope ilk: %r" % ilk)
raise CodeIntelError("could not resolve '%s' getattr on %r in %r"
% (token, elem, scoperef))
def _hits_from_variable_type_inference(self, elem, scoperef):
"""Resolve the type inference for 'elem' at 'scoperef'."""
assert elem.tag == "variable"
hits = []
citdl = elem.get("citdl")
if citdl == "require()":
# Node.js / CommonJS hack: try to resolve things via require()
requirename = elem.get('required_library_name')
if requirename:
self.log(
"_hits_from_variable_type_inference: resolving require(%r)",
requirename)
hits += self._hits_from_commonjs_require(requirename, scoperef)
if len(elem) != 0:
# This is CIX for a JavaScript custom Object instance: a
# common pattern in JS. See test javascript/cpln/local2.
# remember to also return things from require()
return hits + [(elem, scoperef)]
if not citdl:
raise CodeIntelError("no type-inference info for %r" % elem)
self.log("resolve '%s' type inference for %r:", citdl, elem)
if (citdl+".").startswith(elem.get("name")+".") and citdl not in elem.names:
# The citdl expression is the same as the variable name, this will
# create a recursive citdl lookup loop. What we likely want is a
# different match that has the same name, so we go looking for it.
# Fix for bug: http://bugs.activestate.com/show_bug.cgi?id=71666
self.log("_hits_from_variable_type_inference:: recursive citdl "
" expression found, trying alternatives.")
try:
parent_elem = self._elem_from_scoperef(scoperef)
except KeyError as ex:
raise CodeIntelError(
"could not resolve recursive citdl expression %r" % citdl)
else:
alt_hits = []
# Look for alternative non-variable matches.
for child in parent_elem:
if child.tag != "variable" and child.get("name") == citdl:
alt_hits.append((child, scoperef))
# Remember the alternative hit, in case we need to
# look up this lpath again.
if self._alt_elem_from_scoperef is None:
self._alt_elem_from_scoperef = {}
alt_sref_name = ".".join(scoperef[1] + [citdl])
self._alt_elem_from_scoperef[alt_sref_name] = child
self.log(
"Alternative hit found: %r, scoperef: %r", child, scoperef, )
if alt_hits:
return alt_hits
# Try from the parent scoperef then.
scoperef = self.parent_scoperef_from_scoperef(scoperef)
if scoperef is None:
# When we run out of scope, raise an error
raise CodeIntelError(
"could not resolve recursive citdl expression %r" % citdl)
# Continue looking using _hits_from_citdl with the parent.
self.log(
"Continue search for %r from the parent scope.", citdl)
try:
hits += self._hits_from_citdl(citdl, scoperef)
except EvalError:
# shut up eval errors if we managed to get _some_ hits
if not hits:
raise
return hits
def _hits_from_type_inference(self, citdl, scoperef):
"""Resolve the 'citdl' type inference at 'scoperef'."""
self.log("resolve '%s' type inference:", citdl)
return self._hits_from_citdl(citdl, scoperef)
def _hits_from_first_part(self, tokens, scoperef):
"""Resolve the first part of the expression.
If the first token is found at the global or built-in level (or
not found at all locally) then it may be a shared namespace with
other files in the execution set. Get that down to a list of
hits and a remaining list of expression tokens.
"""
elem, scoperef = self._hit_from_first_token(tokens[0], scoperef)
if elem is not None:
self.log("_hit_from_first_part: found elem: %s %r at %r",
elem.get("ilk") or elem.tag, elem.get("name"),
scoperef[1])
if (elem is None # first token wasn't found
or not scoperef[1] # first token was found at global level
# first token was found on built-in Window class (the top scope)
or (scoperef[1] == ['Window'] and scoperef[0].get("name") == "*")
):
# Search symbol table in execution set.
#
# Example: 'myPet.name.toLowerCase()' and 'myPet' is found
# at top-level. First lookup 'myPet.name.toLowerCase'
# (everything up to first '()'), in execution set, then
# 'myPet.name', then 'myPet'. The last one should always hit
# in current file, at least.
for first_call_idx, token in enumerate(tokens):
if token == "()":
break
else:
first_call_idx = len(tokens)
hits = []
for nconsumed in range(first_call_idx, 0, -1):
lpath = tuple(tokens[:nconsumed]) # for hits_from_lpath()
if elem is not None and len(lpath) > 1:
# Try at the current elem we found in the file
try:
self.log("Checking for deeper local match %r from scoperef %r", lpath[
1:], scoperef)
check_elem = elem
for p in lpath[1:]: # we matched first token already
check_elem = check_elem.names[p]
check_scoperef = (scoperef[
0], scoperef[1] + list(lpath[:-1]))
hits.insert(0, (check_elem,
check_scoperef))
self.log("_hit_from_first_part: found deeper local elem: "
"%s %r at %r",
check_elem.get("ilk") or check_elem.tag,
check_elem.get("name"),
check_scoperef[1])
except KeyError:
pass
for lib in self.libs:
self.log("lookup '%s' in %s", '.'.join(lpath), lib)
hits_here = lib.hits_from_lpath(lpath, self.ctlr,
curr_buf=self.buf)
if hits_here:
self.log("found %d hits in lib", len(hits_here))
hits += hits_here
if hits:
break
if elem is not None:
if not hits or nconsumed == 1:
if (elem, scoperef) not in hits:
hits.insert(0, (elem, scoperef))
nconsumed = 1
else:
# Hits were found in the libs that are deeper than
# the hit in the local buf: we need to adjust the
# local hit.
new_elem = elem
for token in tokens[1:nconsumed]:
try:
new_elem = new_elem.names[token]
except KeyError:
break
else:
if new_elem not in (e for e, sr in hits):
new_scoperef = (scoperef[0], tokens[:nconsumed-1])
hits.insert(0, (new_elem, new_scoperef))
else:
hits = [(elem, scoperef)]
nconsumed = 1
return hits, nconsumed
def _hits_from_commonjs_require(self, requirename, scoperef):
"""Resolve hits from a CommonJS require() invocation"""
# Files usually end with a ".js" suffix, though others are like
# ".node" are possible.
#
# TODO: Get these from node using "require.extensions".
requirename += ".js"
from codeintel2.database.langlib import LangDirsLib
from codeintel2.database.multilanglib import MultiLangDirsLib
from codeintel2.database.catalog import CatalogLib
hits = []
for lib in self.libs:
blobs = None
if isinstance(lib, (LangDirsLib, MultiLangDirsLib)):
blobs = lib.blobs_with_basename(requirename, ctlr=self.ctlr)
elif isinstance(lib, CatalogLib):
blob = lib.get_blob(requirename)
if blob is not None:
blobs = [blob]
for blob in blobs or []:
exports = blob.names.get("exports")
if exports is not None and exports.tag == "variable":
hits += self._hits_from_variable_type_inference(
exports, [blob, ["exports"]])
else:
self.log(
"Exported exports to be a variable, got %r instead", exports)
return hits
## n-char trigger completions ##
def _completion_names_from_scope(self, expr, scoperef):
"""Return all available element names beginning with expr"""
self.log("_completion_names_from_scope:: %r, scoperef: %r",
expr, scoperef)
# global_blob = self._elem_from_scoperef(self._get_global_scoperef(scoperef))
# Get all of the imports
# Keep a dictionary of completions.
all_completions = {}
# We start off having JS keywords at a bare minimum.
keywords = self.langintel.langinfo.keywords
for name in keywords:
if not expr or name.startswith(expr):
all_completions[name] = "keyword"
# From the local scope, walk up the parent chain including matches as
# we go.
# XXX - Can we skip the global (stdlib) blob in here?
loopcount = -1
while scoperef and scoperef[0] is not None:
loopcount += 1
# Iterate over the contents of the scope.
self.log("_completion_names_from_scope:: checking scoperef: %r",
scoperef)
elem = self._elem_from_scoperef(scoperef)
if elem is None:
continue
for name in elem.names:
# self.log("_completion_names_from_scope:: checking name: %r",
# name)
if name and name.startswith(expr):
if name not in all_completions:
hit_elem = elem.names[name]
if loopcount and "__local__" in hit_elem.get("attributes", "").split():
# Skip things that should only be local to the
# original scope.
# self.log("_completion_names_from_scope:: skipping local %r",
# name)
continue
all_completions[name] = hit_elem.get(
"ilk") or hit_elem.tag
# Continue walking up the scope chain...
scoperef = self.parent_scoperef_from_scoperef(scoperef)
# Builtins
# Find the matching names (or all names if no expr)
cplns = self.stdlib.toplevel_cplns(prefix=expr)
for ilk, name in cplns:
if name not in all_completions:
all_completions[name] = ilk
# "Import everything", iterate over all known libs
for lib in self.libs:
# Find the matching names (or all names if no expr)
self.log("_completion_names_from_scope:: include everything from "
"lib: %r", lib)
cplns = lib.toplevel_cplns(prefix=expr)
for ilk, name in cplns:
if name not in all_completions:
all_completions[name] = ilk
return [(ilk, name) for name, ilk in list(all_completions.items())]
| {
"content_hash": "10effefb5c9cca3000d303fcfdddbde6",
"timestamp": "",
"source": "github",
"line_count": 968,
"max_line_length": 98,
"avg_line_length": 44.256198347107436,
"alnum_prop": 0.4935574229691877,
"repo_name": "anisku11/sublimeku",
"id": "6b50b38c3c5a0e4ede690b0f115a501104ea38f2",
"size": "44546",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Packages/CodeComplice/libs/codeintel2/tree_javascript.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17282"
},
{
"name": "Go",
"bytes": "5456"
},
{
"name": "HTML",
"bytes": "4990"
},
{
"name": "JavaScript",
"bytes": "1714"
},
{
"name": "PHP",
"bytes": "43942"
},
{
"name": "Python",
"bytes": "9058676"
},
{
"name": "Shell",
"bytes": "95"
}
],
"symlink_target": ""
} |
import os
from setuptools import setup, find_packages
setup(name="b_py",
version="0.1",
description="a test",
url="https://example.com",
py_modules=['b'],
)
| {
"content_hash": "eaa3c05f6fc6c0dc2cfbaef3c89e07fd",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 43,
"avg_line_length": 20.22222222222222,
"alnum_prop": 0.6043956043956044,
"repo_name": "dmerejkowsky/qibuild",
"id": "d64ba264397eb0023030a9fd248d5691b04ae865",
"size": "355",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/qipkg/test/projects/b_py/setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "7442"
},
{
"name": "C++",
"bytes": "22059"
},
{
"name": "CMake",
"bytes": "267118"
},
{
"name": "Java",
"bytes": "4132"
},
{
"name": "Makefile",
"bytes": "2222"
},
{
"name": "Nix",
"bytes": "563"
},
{
"name": "Python",
"bytes": "1145711"
},
{
"name": "Shell",
"bytes": "1085"
}
],
"symlink_target": ""
} |
import asyncio
@asyncio.coroutine
def open_file(name):
print("opening {}".format(name))
return open(name)
@asyncio.coroutine
def close_file(file):
print("closing {}".format(file.name))
file.close()
@asyncio.coroutine
def read_data(file):
print("reading {}".format(file.name))
return file.read()
@asyncio.coroutine
def process_data(filename):
# I want the result from open_file(filename)
# untill it's done don't bother calling me
file = yield from asyncio.async(open_file(filename))
print('opened {}'.format(filename))
# I want the result from read_data(file)
# untill it's done don't bother calling me
data = yield from asyncio.async(read_data(file))
print('read {}'.format(filename))
yield from close_file(file)
@asyncio.coroutine
def main_coro(loop):
# start our tasks asynchronously in futures
tasks = [
asyncio.async(process_data('/etc/passwd')),
asyncio.async(process_data('/etc/group')),
asyncio.async(process_data('/var/log/Xorg.0.log')),
]
# untill all futures are done
while not all(task.done() for task in tasks):
# take a short nap
yield from asyncio.sleep(0.01)
# we're done, so stop the event loop
loop.stop()
# get event loop
loop = asyncio.get_event_loop()
# schedule the main coroutine to start as soon as possible
loop.call_soon(asyncio.async, main_coro(loop))
# run untill explicitly stopped
loop.run_forever()
# instead of the above two lines we can also run
# loop.run_until_complete(main_coro()) and remove
# the loop parameter for main_coro and the call
# to loop.stop() at the end of it
loop.close()
| {
"content_hash": "f2ba78f5be85bf93fa84ed772df96454",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 59,
"avg_line_length": 25.661538461538463,
"alnum_prop": 0.6780575539568345,
"repo_name": "voidabhi/python-scripts",
"id": "c8e39ba3cef7514c1f114572c359dffa5447d22d",
"size": "1668",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python3/asyncio-coroutine.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "446"
},
{
"name": "Go",
"bytes": "330"
},
{
"name": "JavaScript",
"bytes": "1728"
},
{
"name": "Python",
"bytes": "282732"
},
{
"name": "Shell",
"bytes": "794"
}
],
"symlink_target": ""
} |
"""Tests for Plex setup."""
import copy
from datetime import timedelta
import ssl
from unittest.mock import patch
import plexapi
import requests
import homeassistant.components.plex.const as const
from homeassistant.components.plex.models import (
LIVE_TV_SECTION,
TRANSIENT_SECTION,
UNKNOWN_SECTION,
)
from homeassistant.config_entries import ConfigEntryState
from homeassistant.const import (
CONF_TOKEN,
CONF_URL,
CONF_VERIFY_SSL,
STATE_IDLE,
STATE_PLAYING,
)
import homeassistant.util.dt as dt_util
from .const import DEFAULT_DATA, DEFAULT_OPTIONS, PLEX_DIRECT_URL
from .helpers import trigger_plex_update, wait_for_debouncer
from tests.common import MockConfigEntry, async_fire_time_changed
async def test_set_config_entry_unique_id(hass, entry, mock_plex_server):
"""Test updating missing unique_id from config entry."""
assert len(hass.config_entries.async_entries(const.DOMAIN)) == 1
assert entry.state is ConfigEntryState.LOADED
assert (
hass.config_entries.async_entries(const.DOMAIN)[0].unique_id
== mock_plex_server.machine_identifier
)
async def test_setup_config_entry_with_error(hass, entry):
"""Test setup component from config entry with errors."""
with patch(
"homeassistant.components.plex.PlexServer.connect",
side_effect=requests.exceptions.ConnectionError,
):
entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(entry.entry_id) is False
await hass.async_block_till_done()
assert len(hass.config_entries.async_entries(const.DOMAIN)) == 1
assert entry.state is ConfigEntryState.SETUP_RETRY
with patch(
"homeassistant.components.plex.PlexServer.connect",
side_effect=plexapi.exceptions.BadRequest,
):
next_update = dt_util.utcnow() + timedelta(seconds=30)
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
assert len(hass.config_entries.async_entries(const.DOMAIN)) == 1
assert entry.state is ConfigEntryState.SETUP_ERROR
async def test_setup_with_insecure_config_entry(hass, entry, setup_plex_server):
"""Test setup component with config."""
INSECURE_DATA = copy.deepcopy(DEFAULT_DATA)
INSECURE_DATA[const.PLEX_SERVER_CONFIG][CONF_VERIFY_SSL] = False
entry.data = INSECURE_DATA
await setup_plex_server(config_entry=entry)
assert len(hass.config_entries.async_entries(const.DOMAIN)) == 1
assert entry.state is ConfigEntryState.LOADED
async def test_unload_config_entry(hass, entry, mock_plex_server):
"""Test unloading a config entry."""
config_entries = hass.config_entries.async_entries(const.DOMAIN)
assert len(config_entries) == 1
assert entry is config_entries[0]
assert entry.state is ConfigEntryState.LOADED
server_id = mock_plex_server.machine_identifier
loaded_server = hass.data[const.DOMAIN][const.SERVERS][server_id]
assert loaded_server == mock_plex_server
websocket = hass.data[const.DOMAIN][const.WEBSOCKETS][server_id]
await hass.config_entries.async_unload(entry.entry_id)
assert websocket.close.called
assert entry.state is ConfigEntryState.NOT_LOADED
async def test_setup_with_photo_session(hass, entry, setup_plex_server):
"""Test setup component with config."""
await setup_plex_server(session_type="photo")
assert len(hass.config_entries.async_entries(const.DOMAIN)) == 1
assert entry.state is ConfigEntryState.LOADED
await hass.async_block_till_done()
media_player = hass.states.get(
"media_player.plex_plex_for_android_tv_shield_android_tv"
)
assert media_player.state == STATE_IDLE
await wait_for_debouncer(hass)
sensor = hass.states.get("sensor.plex_plex_server_1")
assert sensor.state == "0"
async def test_setup_with_live_tv_session(hass, entry, setup_plex_server):
"""Test setup component with a Live TV session."""
await setup_plex_server(session_type="live_tv")
assert len(hass.config_entries.async_entries(const.DOMAIN)) == 1
assert entry.state is ConfigEntryState.LOADED
await hass.async_block_till_done()
media_player = hass.states.get(
"media_player.plex_plex_for_android_tv_shield_android_tv"
)
assert media_player.state == STATE_PLAYING
assert media_player.attributes["media_library_title"] == LIVE_TV_SECTION
await wait_for_debouncer(hass)
sensor = hass.states.get("sensor.plex_plex_server_1")
assert sensor.state == "1"
async def test_setup_with_transient_session(hass, entry, setup_plex_server):
"""Test setup component with a transient session."""
await setup_plex_server(session_type="transient")
assert len(hass.config_entries.async_entries(const.DOMAIN)) == 1
assert entry.state is ConfigEntryState.LOADED
await hass.async_block_till_done()
media_player = hass.states.get(
"media_player.plex_plex_for_android_tv_shield_android_tv"
)
assert media_player.state == STATE_PLAYING
assert media_player.attributes["media_library_title"] == TRANSIENT_SECTION
await wait_for_debouncer(hass)
sensor = hass.states.get("sensor.plex_plex_server_1")
assert sensor.state == "1"
async def test_setup_with_unknown_session(hass, entry, setup_plex_server):
"""Test setup component with an unknown session."""
await setup_plex_server(session_type="unknown")
assert len(hass.config_entries.async_entries(const.DOMAIN)) == 1
assert entry.state is ConfigEntryState.LOADED
await hass.async_block_till_done()
media_player = hass.states.get(
"media_player.plex_plex_for_android_tv_shield_android_tv"
)
assert media_player.state == STATE_PLAYING
assert media_player.attributes["media_library_title"] == UNKNOWN_SECTION
await wait_for_debouncer(hass)
sensor = hass.states.get("sensor.plex_plex_server_1")
assert sensor.state == "1"
async def test_setup_when_certificate_changed(
hass,
requests_mock,
empty_library,
empty_payload,
plex_server_accounts,
plex_server_default,
plextv_account,
plextv_resources,
plextv_shared_users,
):
"""Test setup component when the Plex certificate has changed."""
class WrongCertHostnameException(requests.exceptions.SSLError):
"""Mock the exception showing a mismatched hostname."""
def __init__(self):
self.__context__ = ssl.SSLCertVerificationError(
f"hostname '{old_domain}' doesn't match"
)
old_domain = "1-2-3-4.1111111111ffffff1111111111ffffff.plex.direct"
old_url = f"https://{old_domain}:32400"
OLD_HOSTNAME_DATA = copy.deepcopy(DEFAULT_DATA)
OLD_HOSTNAME_DATA[const.PLEX_SERVER_CONFIG][CONF_URL] = old_url
old_entry = MockConfigEntry(
domain=const.DOMAIN,
data=OLD_HOSTNAME_DATA,
options=DEFAULT_OPTIONS,
unique_id=DEFAULT_DATA["server_id"],
)
requests_mock.get("https://plex.tv/api/users/", text=plextv_shared_users)
requests_mock.get("https://plex.tv/api/invites/requested", text=empty_payload)
requests_mock.get(old_url, exc=WrongCertHostnameException)
# Test with account failure
requests_mock.get("https://plex.tv/users/account", status_code=401)
old_entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(old_entry.entry_id) is False
await hass.async_block_till_done()
assert old_entry.state is ConfigEntryState.SETUP_ERROR
await hass.config_entries.async_unload(old_entry.entry_id)
# Test with no servers found
requests_mock.get("https://plex.tv/users/account", text=plextv_account)
requests_mock.get("https://plex.tv/api/resources", text=empty_payload)
assert await hass.config_entries.async_setup(old_entry.entry_id) is False
await hass.async_block_till_done()
assert old_entry.state is ConfigEntryState.SETUP_ERROR
await hass.config_entries.async_unload(old_entry.entry_id)
# Test with success
new_url = PLEX_DIRECT_URL
requests_mock.get("https://plex.tv/api/resources", text=plextv_resources)
for resource_url in [new_url, "http://1.2.3.4:32400"]:
requests_mock.get(resource_url, text=plex_server_default)
requests_mock.get(f"{new_url}/accounts", text=plex_server_accounts)
requests_mock.get(f"{new_url}/library", text=empty_library)
requests_mock.get(f"{new_url}/library/sections", text=empty_payload)
assert await hass.config_entries.async_setup(old_entry.entry_id)
await hass.async_block_till_done()
assert len(hass.config_entries.async_entries(const.DOMAIN)) == 1
assert old_entry.state is ConfigEntryState.LOADED
assert old_entry.data[const.PLEX_SERVER_CONFIG][CONF_URL] == new_url
async def test_tokenless_server(entry, setup_plex_server):
"""Test setup with a server with token auth disabled."""
TOKENLESS_DATA = copy.deepcopy(DEFAULT_DATA)
TOKENLESS_DATA[const.PLEX_SERVER_CONFIG].pop(CONF_TOKEN, None)
entry.data = TOKENLESS_DATA
await setup_plex_server(config_entry=entry)
assert entry.state is ConfigEntryState.LOADED
async def test_bad_token_with_tokenless_server(
hass, entry, mock_websocket, setup_plex_server, requests_mock
):
"""Test setup with a bad token and a server with token auth disabled."""
requests_mock.get("https://plex.tv/users/account", status_code=401)
await setup_plex_server()
assert entry.state is ConfigEntryState.LOADED
# Ensure updates that rely on account return nothing
trigger_plex_update(mock_websocket)
await hass.async_block_till_done()
| {
"content_hash": "f39d66c30b028f797b9adab56aa60700",
"timestamp": "",
"source": "github",
"line_count": 273,
"max_line_length": 82,
"avg_line_length": 35.20879120879121,
"alnum_prop": 0.712234706616729,
"repo_name": "lukas-hetzenecker/home-assistant",
"id": "c9bcce0ac83f6e5398c629ecc3defe903356d2b7",
"size": "9612",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/components/plex/test_init.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "38023745"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
from devstack import component as comp
from devstack import log as logging
LOG = logging.getLogger("devstack.components.nova_client")
class NovaClientUninstaller(comp.PythonUninstallComponent):
def __init__(self, *args, **kargs):
comp.PythonUninstallComponent.__init__(self, *args, **kargs)
class NovaClientInstaller(comp.PythonInstallComponent):
def __init__(self, *args, **kargs):
comp.PythonInstallComponent.__init__(self, *args, **kargs)
def _get_download_locations(self):
places = list()
places.append({
'uri': ("git", "novaclient_repo"),
'branch': ("git", "novaclient_branch"),
})
return places
class NovaClientRuntime(comp.EmptyRuntime):
def __init__(self, *args, **kargs):
comp.EmptyRuntime.__init__(self, *args, **kargs)
| {
"content_hash": "d36e4053c689ad9a20ce2e6ad8e06988",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 68,
"avg_line_length": 30.88888888888889,
"alnum_prop": 0.6462829736211031,
"repo_name": "hagleitn/Openstack-Devstack2",
"id": "fcabd713059a34dbba2d21d9756ec5925d128183",
"size": "1511",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "devstack/components/nova_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "322122"
},
{
"name": "Shell",
"bytes": "19341"
}
],
"symlink_target": ""
} |
import io
from PyPDF2 import PdfFileReader, PdfFileWriter
def diff_pdf_pages(pdf1_path, pdf2_path):
pdf2_fp = PdfFileReader(io.BytesIO(pdf2_path))
pdf2_len = pdf2_fp.getNumPages()
if not pdf1_path:
return list(range(0, pdf2_len))
pdf1_fp = PdfFileReader(io.BytesIO(pdf1_path))
pdf1_len = pdf1_fp.getNumPages()
list_differents = list()
for i in range(pdf1_len):
if i >= pdf2_len:
list_differents.append(i)
continue
output1 = PdfFileWriter()
output2 = PdfFileWriter()
output1.addPage(pdf1_fp.getPage(i))
output2.addPage(pdf2_fp.getPage(i))
fp1 = io.BytesIO()
fp2 = io.BytesIO()
output1.write(fp1)
output2.write(fp2)
fp1.seek(0)
fp2.seek(0)
if fp1.read() != fp2.read():
list_differents.append(i)
return list_differents
| {
"content_hash": "b2e0dd2ccd73847f6e2c5f55cbc8fd97",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 50,
"avg_line_length": 24.916666666666668,
"alnum_prop": 0.5964325529542921,
"repo_name": "patryk4815/wtie_utp_plan",
"id": "f036865dfd9c078061450214bbc244dc9ba27d55",
"size": "897",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "_utils/pdf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4317"
}
],
"symlink_target": ""
} |
"""IPVS module
This module exists as a pure-python replacement for ipvsadm.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import six
import socket
import struct
import utils.cpuload.netlink.netlink as netlink
# IPVS forwarding methods
IPVS_MASQUERADING = 0
IPVS_LOCAL = 1
IPVS_TUNNELING = 2
IPVS_ROUTING = 3
IPVS_METHODS = set([
IPVS_MASQUERADING,
IPVS_LOCAL,
IPVS_TUNNELING,
IPVS_ROUTING
])
# These are attr_list_types which are nestable. The command attribute list
# is ultimately referenced by the messages which are passed down to the
# kernel via netlink. These structures must match the type and ordering
# that the kernel expects.
IpvsStatsAttrList = netlink.create_attr_list_type(
'IpvsStatsAttrList',
('CONNS', netlink.U32Type),
('INPKTS', netlink.U32Type),
('OUTPKTS', netlink.U32Type),
('INBYTES', netlink.U64Type),
('OUTBYTES', netlink.U64Type),
('CPS', netlink.U32Type),
('INPPS', netlink.U32Type),
('OUTPPS', netlink.U32Type),
('INBPS', netlink.U32Type),
('OUTBPS', netlink.U32Type),
)
IpvsStatsAttrList64 = netlink.create_attr_list_type(
'IpvsStatsAttrList64',
('CONNS', netlink.U64Type),
('INPKTS', netlink.U64Type),
('OUTPKTS', netlink.U64Type),
('INBYTES', netlink.U64Type),
('OUTBYTES', netlink.U64Type),
('CPS', netlink.U64Type),
('INPPS', netlink.U64Type),
('OUTPPS', netlink.U64Type),
('INBPS', netlink.U64Type),
('OUTBPS', netlink.U64Type),
)
IpvsServiceAttrList = netlink.create_attr_list_type(
'IpvsServiceAttrList',
('AF', netlink.U16Type),
('PROTOCOL', netlink.U16Type),
('ADDR', netlink.BinaryType),
('PORT', netlink.Net16Type),
('FWMARK', netlink.U32Type),
('SCHED_NAME', netlink.NulStringType),
('FLAGS', netlink.BinaryType),
('TIMEOUT', netlink.U32Type),
('NETMASK', netlink.U32Type),
('STATS', IpvsStatsAttrList),
('PE_NAME', netlink.NulStringType),
('STATS64', IpvsStatsAttrList64),
)
IpvsDestAttrList = netlink.create_attr_list_type(
'IpvsDestAttrList',
('ADDR', netlink.BinaryType),
('PORT', netlink.Net16Type),
('FWD_METHOD', netlink.U32Type),
('WEIGHT', netlink.I32Type),
('U_THRESH', netlink.U32Type),
('L_THRESH', netlink.U32Type),
('ACTIVE_CONNS', netlink.U32Type),
('INACT_CONNS', netlink.U32Type),
('PERSIST_CONNS', netlink.U32Type),
('STATS', IpvsStatsAttrList),
('ADDR_FAMILY', netlink.U16Type),
('STATS64', IpvsStatsAttrList64),
)
IpvsDaemonAttrList = netlink.create_attr_list_type(
'IpvsDaemonAttrList',
('STATE', netlink.U32Type),
('MCAST_IFN', netlink.NulStringType),
('SYNC_ID', netlink.U32Type),
)
IpvsInfoAttrList = netlink.create_attr_list_type(
'IpvsInfoAttrList',
('VERSION', netlink.U32Type),
('CONN_TAB_SIZE', netlink.U32Type),
)
IpvsCmdAttrList = netlink.create_attr_list_type(
'IpvsCmdAttrList',
('SERVICE', IpvsServiceAttrList),
('DEST', IpvsDestAttrList),
('DAEMON', IpvsDaemonAttrList),
('TIMEOUT_TCP', netlink.U32Type),
('TIMEOUT_TCP_FIN', netlink.U32Type),
('TIMEOUT_UDP', netlink.U32Type),
)
IpvsMessage = netlink.create_genl_message_type(
'IpvsMessage', 'IPVS',
('NEW_SERVICE', IpvsCmdAttrList),
('SET_SERVICE', IpvsCmdAttrList),
('DEL_SERVICE', IpvsCmdAttrList),
('GET_SERVICE', IpvsCmdAttrList),
('NEW_DEST', IpvsCmdAttrList),
('SET_DEST', IpvsCmdAttrList),
('DEL_DEST', IpvsCmdAttrList),
('GET_DEST', IpvsCmdAttrList),
('NEW_DAEMON', IpvsCmdAttrList),
('DEL_DAEMON', IpvsCmdAttrList),
('GET_DAEMON', IpvsCmdAttrList),
('SET_CONFIG', IpvsCmdAttrList),
('GET_CONFIG', IpvsCmdAttrList),
('SET_INFO', IpvsCmdAttrList),
('GET_INFO', IpvsCmdAttrList),
('ZERO', IpvsCmdAttrList),
('FLUSH', IpvsCmdAttrList),
required_modules=['ip_vs'],
)
def verbose(f):
def g(self, *args, **kwargs):
if self.verbose:
s_args = [repr(a) for a in args]
s_args.extend(['{0}={1}'.format(k, repr(v))
for k, v in six.iteritems(kwargs)])
print('{0}({1})'.format(f.__name__, ', '.join(s_args)))
return f(self, *args, **kwargs)
return g
def _validate_ip(ip):
try:
socket.inet_pton(_to_af(ip), ip)
return True
except socket.error:
return False
def _to_af(ip):
return socket.AF_INET6 if ':' in ip else socket.AF_INET
def _to_af_union(ip):
af = _to_af(ip)
return af, socket.inet_pton(af, ip).ljust(16, b'\0')
def _from_af_union(af, addr):
n = 4 if af == socket.AF_INET else 16
return socket.inet_ntop(af, addr[:n])
def _to_proto_num(proto):
if proto is None:
return None
if proto.lower() == 'tcp':
return socket.IPPROTO_TCP
elif proto.lower() == 'udp':
return socket.IPPROTO_UDP
else:
assert False, 'unknown proto %s' % proto
def _from_proto_num(n):
if n is None:
return None
if n == socket.IPPROTO_TCP:
return 'tcp'
elif n == socket.IPPROTO_UDP:
return 'udp'
else:
assert False, 'unknown proto num %d' % n
class Dest(object):
"""Describes a real server to be load balanced to."""
def __init__(self, d={}, validate=False):
self.ip_ = d.get('ip', None)
self.weight_ = d.get('weight', None)
self.port_ = d.get('port', None)
self.fwd_method_ = d.get('fwd_method', IPVS_TUNNELING)
def __repr__(self):
return 'Dest(d=dict(ip="%s", weight=%d))' % (self.ip(), self.weight())
def ip(self):
return self.ip_
def weight(self):
return self.weight_
def port(self):
return self.port_
def fwd_method(self):
return self.fwd_method_
def validate(self):
assert _validate_ip(self.ip_)
assert isinstance(self.weight_, int)
assert self.weight_ >= -1
assert self.fwd_method_ in IPVS_METHODS
def to_dict(self):
return {
'ip': self.ip_,
'weight': self.weight_,
}
def to_attr_list(self):
af, addr = _to_af_union(self.ip_)
return IpvsDestAttrList(addr_family=af,
addr=addr,
port=self.port_,
fwd_method=self.fwd_method_)
def __eq__(self, other):
return isinstance(other, Dest) and self.to_dict() == other.to_dict()
def __ne__(self, other):
return not self.__eq__(other)
@staticmethod
def from_attr_list(lst, default_af=None):
return Dest(
d={
'ip': _from_af_union(lst.get('addr_family', default_af),
lst.get('addr')),
'weight': lst.get('weight'),
'port': lst.get('port'),
'fwd_method': lst.get('fwd_method')
},
validate=True,
)
class Service(object):
"""Describes a load balanced service.
"""
def __init__(self, d={}, validate=False):
self.proto_ = d.get('proto', None)
self.vip_ = d.get('vip', None)
self.port_ = d.get('port', None)
self.sched_ = d.get('sched', None)
self.fwmark_ = d.get('fwmark', None)
default_af = None
if self.vip_:
default_af = _to_af(self.vip_)
self.af_ = d.get('af', default_af)
if validate:
self.validate()
def __repr__(self):
if self.fwmark_ is not None:
return 'Service(d=dict(fwmark=%d, sched="%s", af="%s"))' % (
self.fwmark(), self.sched(), self.af())
return 'Service(d=dict(proto="%s", vip="%s", port=%d, sched="%s"))' % (
self.proto(), self.vip(), self.port(), self.sched())
def af(self):
return self.af_
def fwmark(self):
return self.fwmark_
def proto(self):
return self.proto_
def proto_num(self):
return _to_proto_num(self.proto_)
def port(self):
return self.port_
def vip(self):
return self.vip_
def sched(self):
return self.sched_
def validate(self):
assert self.af_ in [socket.AF_INET, socket.AF_INET6]
if self.vip_ or self.port_ or self.proto_:
assert self.proto_.lower() in ['tcp', 'udp']
assert _validate_ip(self.vip_)
assert isinstance(self.port_, int)
assert self.port_ > 0 and self.port_ < (2 ** 16)
assert self.fwmark_ is None
else:
assert isinstance(self.fwmark_, int)
assert self.proto_ is None
assert self.port_ is None
assert self.vip_ is None
assert self.fwmark_ > 0 and self.fwmark_ < (2 ** 32)
def to_dict(self):
self.validate()
if self.fwmark_ is None:
return {
'proto': self.proto_,
'vip': self.vip_,
'port': self.port_,
'sched': self.sched_,
'af': self.af_
}
else:
return {
'fwmark': self.fwmark_,
'sched': self.sched_,
'af': self.af_
}
def to_attr_list(self):
if self.fwmark_ is None:
af, addr = _to_af_union(self.vip_)
netmask = ((1 << 32) - 1) if af == socket.AF_INET else 128
proto = self.proto_num()
return IpvsServiceAttrList(af=af, addr=addr, protocol=proto,
netmask=netmask, port=self.port_,
sched_name=self.sched_,
flags=struct.pack(str('=II'), 0, 0))
else:
netmask = ((1 << 32) - 1)
return IpvsServiceAttrList(fwmark=self.fwmark_, af=self.af_,
netmask=netmask, sched_name=self.sched_,
flags=struct.pack(str('=II'), 0, 0))
def __eq__(self, other):
return isinstance(other, Service) and self.to_dict() == other.to_dict()
def __ne__(self, other):
return not self.__eq__(other)
@staticmethod
def from_attr_list(lst):
if lst.get('addr', None) is not None:
d = dict(
vip=_from_af_union(lst.get('af'), lst.get('addr')),
proto=_from_proto_num(lst.get('protocol')),
port=lst.get('port'),
sched=lst.get('sched_name'),
af=lst.get('af'),
)
else:
d = dict(
fwmark=lst.get('fwmark'),
sched=lst.get('sched_name'),
af=lst.get('af'),
)
return Service(d=d, validate=True)
class Pool(object):
"""A tuple of a service and an array of dests for that service
"""
def __init__(self, d={}, validate=False):
self.service_ = Service(d.get('service', {}), validate)
self.dests_ = [Dest(x, validate) for x in d.get('dests', [])]
def service(self):
return self.service_
def dests(self):
return self.dests_
def validate(self):
self.service_.validate()
for dest in self.dests_:
dest.validate()
def to_dict(self):
self.validate()
return {
'service': self.service_.to_dict(),
'dests': [d.to_dict() for d in self.dests_],
}
@staticmethod
def from_args(service=None, dests=[]):
assert isinstance(service, Service)
assert isinstance(dests, list)
p = Pool()
p.service_ = service
p.dests_ = dests
return p
@staticmethod
def load_pools_from_json_list(lst):
return [Pool(i, True) for i in lst]
class IpvsClient(object):
"""A python client to use instead of shelling out to ipvsadm
"""
def __init__(self, verbose=False):
self.verbose = verbose
self.nlsock = netlink.NetlinkSocket(verbose=verbose)
def __modify_service(self, method, vip, port, protocol, **svc_kwargs):
af, addr = _to_af_union(vip)
netmask = ((1 << 32) - 1) if af == socket.AF_INET else 128
out_msg = IpvsMessage(
method, flags=netlink.MessageFlags.ACK_REQUEST,
attr_list=IpvsCmdAttrList(
service=IpvsServiceAttrList(
af=af,
port=port,
protocol=protocol,
addr=addr,
netmask=netmask,
flags=struct.pack(str('=II'), 0, 0),
**svc_kwargs
)
)
)
self.nlsock.execute(out_msg)
@verbose
def add_service(self, vip, port, protocol=socket.IPPROTO_TCP,
sched_name='rr'):
self.__modify_service('new_service', vip, port, protocol,
sched_name=sched_name, timeout=0)
@verbose
def del_service(self, vip, port, protocol=socket.IPPROTO_TCP):
self.__modify_service('del_service', vip, port, protocol)
def __modify_fwm_service(self, method, fwmark, af, **svc_kwargs):
netmask = ((1 << 32) - 1) if af == socket.AF_INET else 128
out_msg = IpvsMessage(
method, flags=netlink.MessageFlags.ACK_REQUEST,
attr_list=IpvsCmdAttrList(
service=IpvsServiceAttrList(
fwmark=fwmark,
flags=struct.pack(str('=II'), 0, 0),
af=af,
netmask=netmask,
**svc_kwargs
)
)
)
self.nlsock.execute(out_msg)
@verbose
def add_fwm_service(self, fwmark, sched_name='rr', af=socket.AF_INET):
self.__modify_fwm_service('new_service', fwmark,
sched_name=sched_name, timeout=0, af=af)
@verbose
def del_fwm_service(self, fwmark, af=socket.AF_INET):
self.__modify_fwm_service('del_service', fwmark, af=af)
def __modify_dest(self, method, vip, port, rip, rport=None,
protocol=socket.IPPROTO_TCP, **dest_kwargs):
vaf, vaddr = _to_af_union(vip)
raf, raddr = _to_af_union(rip)
rport = rport or port
out_msg = IpvsMessage(
method, flags=netlink.MessageFlags.ACK_REQUEST,
attr_list=IpvsCmdAttrList(
service=IpvsServiceAttrList(
af=vaf,
port=port,
protocol=protocol,
addr=vaddr,
),
dest=IpvsDestAttrList(
addr_family=raf,
addr=raddr,
port=rport,
**dest_kwargs
),
),
)
self.nlsock.execute(out_msg)
@verbose
def add_dest(self, vip, port, rip, rport=None,
protocol=socket.IPPROTO_TCP, weight=1, method=IPVS_TUNNELING):
self.__modify_dest('new_dest', vip, port, rip, rport,
protocol=protocol, weight=weight,
fwd_method=method, l_thresh=0, u_thresh=0)
@verbose
def update_dest(self, vip, port, rip, rport=None,
protocol=socket.IPPROTO_TCP, weight=None,
method=IPVS_TUNNELING):
self.__modify_dest('set_dest', vip, port, rip, rport, protocol,
weight=weight, l_thresh=0, u_thresh=0,
fwd_method=method)
@verbose
def del_dest(self, vip, port, rip, rport=None,
protocol=socket.IPPROTO_TCP):
self.__modify_dest('del_dest', vip, port, rip, rport, protocol)
def __modify_fwm_dest(self, method, fwmark, rip, vaf, port,
**dest_kwargs):
raf, raddr = _to_af_union(rip)
out_msg = IpvsMessage(
method, flags=netlink.MessageFlags.ACK_REQUEST,
attr_list=IpvsCmdAttrList(
service=IpvsServiceAttrList(
fwmark=fwmark,
af=vaf,
),
dest=IpvsDestAttrList(
addr_family=raf,
addr=raddr,
port=port,
**dest_kwargs
),
),
)
self.nlsock.execute(out_msg)
@verbose
def add_fwm_dest(self, fwmark, rip, vaf=socket.AF_INET, port=0, weight=1):
self.__modify_fwm_dest('new_dest', fwmark, rip, weight=weight,
port=port, vaf=vaf, l_thresh=0, u_thresh=0,
fwd_method=2)
@verbose
def update_fwm_dest(self, fwmark, rip, vaf=socket.AF_INET, weight=None,
port=0):
self.__modify_fwm_dest('set_dest', fwmark, rip, weight=weight,
vaf=vaf, port=port, l_thresh=0, u_thresh=0,
fwd_method=2)
@verbose
def del_fwm_dest(self, fwmark, rip, vaf=socket.AF_INET, port=0):
self.__modify_fwm_dest('del_dest', fwmark, rip, vaf=vaf, port=port)
def flush(self):
out_msg = IpvsMessage('flush', flags=netlink.MessageFlags.ACK_REQUEST)
self.nlsock.execute(out_msg)
def get_pools(self):
"""
Get all the pools configured
"""
pools = []
req = IpvsMessage(
'get_service', flags=netlink.MessageFlags.MATCH_ROOT_REQUEST)
for msg in self.nlsock.query(req):
svc_lst = msg.get_attr_list().get('service')
service = Service.from_attr_list(svc_lst)
dests = self.get_dests(svc_lst)
pools.append(Pool.from_args(
service=service,
dests=dests
))
return pools
def get_pool(self, svc_lst):
s = self.get_service(svc_lst)
if s is None:
return None
dests = self.get_dests(s.to_attr_list())
return Pool.from_args(service=s, dests=dests)
def get_service(self, svc_lst):
out_msg = IpvsMessage(
'get_service', flags=netlink.MessageFlags.REQUEST,
attr_list=IpvsCmdAttrList(service=svc_lst))
try:
res = self.nlsock.query(out_msg)
svc_lst = res[0].get_attr_list().get('service')
return Service.from_attr_list(svc_lst)
except RuntimeError:
# If the query failed because the service is not present
# simply return None
return None
def get_dests(self, svc_lst):
assert isinstance(svc_lst, IpvsServiceAttrList)
dests = []
out_msg = IpvsMessage(
'get_dest', flags=netlink.MessageFlags.MATCH_ROOT_REQUEST,
attr_list=IpvsCmdAttrList(service=svc_lst)
)
try:
for dst_msg in self.nlsock.query(out_msg):
dst_lst = dst_msg.get_attr_list().get('dest')
dests.append(Dest.from_attr_list(dst_lst, svc_lst.get('af')))
return dests
except RuntimeError:
# Typically happens if the service is not defined
return None
| {
"content_hash": "27dd5182af887c2a87be12efa1efc894",
"timestamp": "",
"source": "github",
"line_count": 616,
"max_line_length": 79,
"avg_line_length": 31.32792207792208,
"alnum_prop": 0.5401077831899679,
"repo_name": "knightXun/BabyCare",
"id": "bcef5ebcc1ed68a7dba9e590f4921bd5eb6d94dd",
"size": "19594",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/cpuload/netlink/ipvs.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "125863"
}
],
"symlink_target": ""
} |
from __future__ import division
"""
Computes the correction for surace energy error associated with vacancy
formation energy by deducing the surface area of vacancy from the
first principles calculation of vacancies with lda, pbe and pw91
functionals.
The unit area correction computed is defined in Phys.Rev.B 73, 195123, 2006.
The surface area computation is defined in Phys.Rev.B 85, 144118, 2012.
"""
__author__ = "Bharat Medasani"
import math
from numpy import array, linalg, ones
from pymatgen.symmetry.analyzer import SymmetryAnalyzer
from pymatgen.io.vaspio_set import MPGGAVaspInputSet
from pymargen.io.vaspio.vasp_input import Potcar
ergpercmsq_to_evperangsq = 6.24150934e-5
se_corr = {
"LDA":{"A":448.454, "B":-55.845},
"PW91":{"A":1577.2, "B":-231.29},
"PBE":{"A":1193.7, "B":-174.37}}
Bohr_rad = 5.2917721092e-1
def unit_xc_correction(atoms, valence, volume, functional):
"""
Computes the unit area surface energy correction for a functional
Args:
atoms: # of atoms in lattice
valence: Valence of the atoms
volume: Volume of the lattice.
functional: Accepted values are PBE, PW91, and LDA
Returns:
Correction in ev/A^2
"""
blk_electron_den = valence*atoms/volume
rs = (3/(4*math.pi*blk_electron_den))**(1/3.0)
rs = rs/Bohr_rad
rspa = rs**(-5.0/2)
rspb = rs**(-3.0/2)
a = se_corr[functional]['A']
b = se_corr[functional]['B']
corr = (a*rspa + b*rspb) * ergpercmsq_to_evperangsq
return corr
def correction(energy_dict, lattice_dict, valence_dict=None):
"""
Computes the surface energy correction for a neutral vacancy in metal
Args:
energy_dict: Uncorrected vacancy formation energy for each functional
lattice_dict: Bulk structures for each functional
valence_dict: # of valence electrons for each functional
If not specified obtained from the potcar files.
(Needs vasp potcar files if not specified.)
Returns:
Corrected vacancy formation energy and surface area of vacancies
"""
lda_uc_e0 = energy_dict['LDA']
lda_struct = lattice_dict['LDA']
lda_uc_struct = SymmetryAnalyzer(
lda_struct).get_conventional_standard_structure()
lda_volume = lda_uc_struct.volume/lda_uc_struct.num_sites
lda_surfarea = lda_volume**(2.0/3)
pbe_uc_e0 = energy_dict['PBE']
pbe_struct = lattice_dict['PBE']
pbe_uc_struct = SymmetryAnalyzer(
pbe_struct).get_conventional_standard_structure()
pbe_volume = pbe_uc_struct.volume/pbe_uc_struct.num_sites
pbe_surfarea = pbe_volume**(2.0/3)
pw91_uc_e0 = energy_dict['PW91']
pw91_struct = lattice_dict['PW91']
pw91_uc_struct = SymmetryAnalyzer(
pw91_struct).get_conventional_standard_structure()
pw91_volume = pw91_uc_struct.volume/pw91_uc_struct.num_sites
pw91_surfarea = pw91_volume**(2.0/3)
if valence_dict:
lda_valence = valence_dict['LDA']
pbe_valence = valence_dict['PBE']
pw91_valence = valence_dict['PW91']
else:
try:
mpvis = MPGGAVaspINputSet()
except:
raise ValueError('POTCAR not found. Supply valence of element')
potcar = mpvis.get_potcar(pbe_uc_struct)
pbe_valence = potcar[0].zval
potcar_dict = potcar.to_dict
potcar_dict.update({'functional':'LDA'})
potcar = Potcar.from_dict(potcar_dict)
lda_valence = potcar[0].zval
potcar_dict.update({'functional':'PW91'})
potcar = Potcar.from_dict(potcar_dict)
pw91_valence = potcar[0].zval
lda_xc_cor = correction(1, lda_valence, lda_volume, 'LDA')
pbe_xc_cor = correction(1, pbe_valence, pbe_volume, 'PBE')
pw91_xc_cor = correction(1, pw91_valence, pw91_volume, 'PW91')
x = [lda_xc_cor*lda_surfarea,
pbe_xc_cor*pbe_surfarea,
pw91_xc_cor*pw91_surfarea]
A = array([x, ones(3)])
y = [lda_uc_e0, pbe_uc_e0, pw91_uc_e0]
w = linalg.lstsq(A.T, y)[0]
surf_err = {
'vac_surface_area':{
'LDA':w[0]*lda_surfarea,
'PBE':w[0]*pbe_surfarea,
'PW91':w[0]*pw91_surfarea
},
'vacancy_form_energy_corrected':{
'LDA':lda_uc_e0-w[0]*lda_surfarea*lda_xc_cor,
'PBE':pbe_uc_e0-w[0]*pbe_surfarea*pbe_xc_cor,
'PW91':pw91_uc_e0-w[0]*pw91_surfarea*pw91_xc_cor
}
}
return surf_err
'
| {
"content_hash": "1c31a11f0caf5d773b579e4884789025",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 78,
"avg_line_length": 34.42424242424242,
"alnum_prop": 0.6234595070422535,
"repo_name": "mbkumar/pycdcd",
"id": "0606139f4400713fd8b33ade82622fc3c87b7e23",
"size": "4544",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corrections/surface_correction.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6864"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# no unicode literals
import WatchmanTestCase
import json
import tempfile
import os
import os.path
import sys
try:
import unittest2 as unittest
except ImportError:
import unittest
@unittest.skipIf(sys.platform != 'darwin', 'N/A unless macOS')
class TestFSEventsResync(WatchmanTestCase.WatchmanTestCase):
def test_resync(self):
root = self.mkdtemp()
with open(os.path.join(root, '.watchmanconfig'), 'w') as f:
f.write(json.dumps({
'fsevents_try_resync': True
}))
self.watchmanCommand('watch', root)
self.touchRelative(root, '111')
self.assertFileList(root, ['.watchmanconfig', '111'])
res = self.watchmanCommand('query', root, {
'fields': ['name']})
self.assertTrue(res['is_fresh_instance'])
clock = res['clock']
dropinfo = self.watchmanCommand('debug-fsevents-inject-drop', root)
self.assertTrue('last_good' in dropinfo, dropinfo)
# We expect to see the results of these two filesystem operations
# on our next query, and not see evidence of a recrawl
os.unlink(os.path.join(root, '111'))
self.touchRelative(root, '222')
res = self.watchmanCommand('query', root, {
'since': clock,
'expression': ['exists'],
'fields': ['name']})
self.assertFalse(res['is_fresh_instance'], res)
self.assertTrue('warning' not in res, res)
self.assertEqual(self.normWatchmanFileList(res['files']),
self.normFileList(['222']))
| {
"content_hash": "bded0a55fc59c8ed0abb50df36ebe398",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 75,
"avg_line_length": 31.943396226415093,
"alnum_prop": 0.6237448316597756,
"repo_name": "dhruvsinghal/watchman",
"id": "701b8d8d66085df174645ba205b7c4a1b73e26cc",
"size": "1803",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration/test_fsevents_resync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "725"
},
{
"name": "C",
"bytes": "733960"
},
{
"name": "CSS",
"bytes": "40936"
},
{
"name": "HTML",
"bytes": "37547"
},
{
"name": "Java",
"bytes": "152559"
},
{
"name": "JavaScript",
"bytes": "38082"
},
{
"name": "M4",
"bytes": "9662"
},
{
"name": "Makefile",
"bytes": "12645"
},
{
"name": "PHP",
"bytes": "152384"
},
{
"name": "Python",
"bytes": "172353"
},
{
"name": "Ruby",
"bytes": "16447"
},
{
"name": "Shell",
"bytes": "3566"
}
],
"symlink_target": ""
} |
"""empty message
Revision ID: 0112_add_start_end_dates
Revises: 0111_drop_old_service_flags
Create Date: 2017-07-12 13:35:45.636618
"""
from datetime import datetime
import sqlalchemy as sa
from alembic import op
from app.dao.date_util import get_month_start_and_end_date_in_utc
down_revision = "0111_drop_old_service_flags"
revision = "0112_add_start_end_dates"
def upgrade():
op.drop_index("uix_monthly_billing", "monthly_billing")
op.alter_column("monthly_billing", "month", nullable=True)
op.alter_column("monthly_billing", "year", nullable=True)
op.add_column("monthly_billing", sa.Column("start_date", sa.DateTime))
op.add_column("monthly_billing", sa.Column("end_date", sa.DateTime))
conn = op.get_bind()
results = conn.execute("Select id, month, year from monthly_billing")
res = results.fetchall()
for x in res:
start_date, end_date = get_month_start_and_end_date_in_utc(
datetime(int(x.year), datetime.strptime(x.month, "%B").month, 1)
)
conn.execute(
"update monthly_billing set start_date = '{}', end_date = '{}' where id = '{}'".format(
start_date, end_date, x.id
)
)
op.alter_column("monthly_billing", "start_date", nullable=False)
op.alter_column("monthly_billing", "end_date", nullable=False)
op.create_index(
op.f("uix_monthly_billing"), "monthly_billing", ["service_id", "start_date", "notification_type"], unique=True
)
def downgrade():
op.drop_column("monthly_billing", "start_date")
op.drop_column("monthly_billing", "end_date")
op.create_index(
op.f("uix_monthly_billing"),
"monthly_billing",
["service_id", "month", "year", "notification_type"],
unique=True,
)
| {
"content_hash": "1b9f26f92f369638ec1ee7241ffbdd29",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 118,
"avg_line_length": 33.698113207547166,
"alnum_prop": 0.641097424412094,
"repo_name": "alphagov/notifications-api",
"id": "8ae738c88aaf83ec0833c588e1453d955ad350c6",
"size": "1786",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "migrations/versions/0112_add_start_end_dates.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "719"
},
{
"name": "Jinja",
"bytes": "5543"
},
{
"name": "Makefile",
"bytes": "6627"
},
{
"name": "Mako",
"bytes": "361"
},
{
"name": "Procfile",
"bytes": "35"
},
{
"name": "Python",
"bytes": "3506225"
},
{
"name": "Shell",
"bytes": "13179"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
class SurveyorError(Exception):
pass
class XMLParseError(SurveyorError, ValueError):
pass
class UnexpectedTagError(XMLParseError):
def __init__(self, incoming, expected):
message = "Expected tag is {0}, but got {1}".format(expected, incoming)
super(UnexpectedTagError, self).__init__(message)
class CannotImportClassError(XMLParseError):
def __init__(self, classpath, exception):
message = "Cannot import '{0}': {1}".format(classpath, exception)
super(CannotImportClassError, self).__init__(message)
class CannotFindElementClass(XMLParseError):
def __init__(self, class_module, class_name):
message = "Cannot find '{0}' in module '{1.__name__}' ({1.__file__})".format(class_name, class_module)
super(CannotFindElementClass, self).__init__(message)
| {
"content_hash": "cec69bc90e2a4075d38e9bb616ece8f4",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 110,
"avg_line_length": 28.966666666666665,
"alnum_prop": 0.6777905638665133,
"repo_name": "9seconds/surveyor",
"id": "fd18a18bfb94a337b78e70f21d40ea0e6a5a6c93",
"size": "895",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "surveyor/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "699"
},
{
"name": "Python",
"bytes": "35606"
}
],
"symlink_target": ""
} |
from collections import OrderedDict
import os
import re
from typing import (
Dict,
Mapping,
MutableMapping,
MutableSequence,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.oauth2 import service_account # type: ignore
import pkg_resources
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.cloud.bare_metal_solution_v2.services.bare_metal_solution import pagers
from google.cloud.bare_metal_solution_v2.types import nfs_share as gcb_nfs_share
from google.cloud.bare_metal_solution_v2.types import baremetalsolution
from google.cloud.bare_metal_solution_v2.types import instance
from google.cloud.bare_metal_solution_v2.types import instance as gcb_instance
from google.cloud.bare_metal_solution_v2.types import lun
from google.cloud.bare_metal_solution_v2.types import network
from google.cloud.bare_metal_solution_v2.types import network as gcb_network
from google.cloud.bare_metal_solution_v2.types import nfs_share
from google.cloud.bare_metal_solution_v2.types import volume
from google.cloud.bare_metal_solution_v2.types import volume as gcb_volume
from .transports.base import DEFAULT_CLIENT_INFO, BareMetalSolutionTransport
from .transports.grpc import BareMetalSolutionGrpcTransport
from .transports.grpc_asyncio import BareMetalSolutionGrpcAsyncIOTransport
class BareMetalSolutionClientMeta(type):
"""Metaclass for the BareMetalSolution client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[BareMetalSolutionTransport]]
_transport_registry["grpc"] = BareMetalSolutionGrpcTransport
_transport_registry["grpc_asyncio"] = BareMetalSolutionGrpcAsyncIOTransport
def get_transport_class(
cls,
label: Optional[str] = None,
) -> Type[BareMetalSolutionTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class BareMetalSolutionClient(metaclass=BareMetalSolutionClientMeta):
"""Performs management operations on Bare Metal Solution servers.
The ``baremetalsolution.googleapis.com`` service provides management
capabilities for Bare Metal Solution servers. To access the API
methods, you must assign Bare Metal Solution IAM roles containing
the desired permissions to your staff in your Google Cloud project.
You must also enable the Bare Metal Solution API. Once enabled, the
methods act upon specific servers in your Bare Metal Solution
environment.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "baremetalsolution.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
BareMetalSolutionClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
BareMetalSolutionClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> BareMetalSolutionTransport:
"""Returns the transport used by the client instance.
Returns:
BareMetalSolutionTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def instance_path(
project: str,
location: str,
instance: str,
) -> str:
"""Returns a fully-qualified instance string."""
return "projects/{project}/locations/{location}/instances/{instance}".format(
project=project,
location=location,
instance=instance,
)
@staticmethod
def parse_instance_path(path: str) -> Dict[str, str]:
"""Parses a instance path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/instances/(?P<instance>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def lun_path(
project: str,
location: str,
volume: str,
lun: str,
) -> str:
"""Returns a fully-qualified lun string."""
return "projects/{project}/locations/{location}/volumes/{volume}/luns/{lun}".format(
project=project,
location=location,
volume=volume,
lun=lun,
)
@staticmethod
def parse_lun_path(path: str) -> Dict[str, str]:
"""Parses a lun path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/volumes/(?P<volume>.+?)/luns/(?P<lun>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def network_path(
project: str,
location: str,
network: str,
) -> str:
"""Returns a fully-qualified network string."""
return "projects/{project}/locations/{location}/networks/{network}".format(
project=project,
location=location,
network=network,
)
@staticmethod
def parse_network_path(path: str) -> Dict[str, str]:
"""Parses a network path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/networks/(?P<network>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def nfs_share_path(
project: str,
location: str,
nfs_share: str,
) -> str:
"""Returns a fully-qualified nfs_share string."""
return "projects/{project}/locations/{location}/nfsShares/{nfs_share}".format(
project=project,
location=location,
nfs_share=nfs_share,
)
@staticmethod
def parse_nfs_share_path(path: str) -> Dict[str, str]:
"""Parses a nfs_share path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/nfsShares/(?P<nfs_share>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def server_network_template_path(
project: str,
location: str,
server_network_template: str,
) -> str:
"""Returns a fully-qualified server_network_template string."""
return "projects/{project}/locations/{location}/serverNetworkTemplate/{server_network_template}".format(
project=project,
location=location,
server_network_template=server_network_template,
)
@staticmethod
def parse_server_network_template_path(path: str) -> Dict[str, str]:
"""Parses a server_network_template path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/serverNetworkTemplate/(?P<server_network_template>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def volume_path(
project: str,
location: str,
volume: str,
) -> str:
"""Returns a fully-qualified volume string."""
return "projects/{project}/locations/{location}/volumes/{volume}".format(
project=project,
location=location,
volume=volume,
)
@staticmethod
def parse_volume_path(path: str) -> Dict[str, str]:
"""Parses a volume path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/volumes/(?P<volume>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(
billing_account: str,
) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(
folder: str,
) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(
folder=folder,
)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(
organization: str,
) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(
organization=organization,
)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(
project: str,
) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(
project=project,
)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(
project: str,
location: str,
) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project,
location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[client_options_lib.ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
if client_options is None:
client_options = client_options_lib.ClientOptions()
use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_client_cert not in ("true", "false"):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
# Figure out the client cert source to use.
client_cert_source = None
if use_client_cert == "true":
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
client_cert_source = mtls.default_client_cert_source()
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
elif use_mtls_endpoint == "always" or (
use_mtls_endpoint == "auto" and client_cert_source
):
api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = cls.DEFAULT_ENDPOINT
return api_endpoint, client_cert_source
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Optional[Union[str, BareMetalSolutionTransport]] = None,
client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the bare metal solution client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, BareMetalSolutionTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
client_options = cast(client_options_lib.ClientOptions, client_options)
api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
client_options
)
api_key_value = getattr(client_options, "api_key", None)
if api_key_value and credentials:
raise ValueError(
"client_options.api_key and credentials are mutually exclusive"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, BareMetalSolutionTransport):
# transport is a BareMetalSolutionTransport instance.
if credentials or client_options.credentials_file or api_key_value:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
import google.auth._default # type: ignore
if api_key_value and hasattr(
google.auth._default, "get_api_key_credentials"
):
credentials = google.auth._default.get_api_key_credentials(
api_key_value
)
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
api_audience=client_options.api_audience,
)
def list_instances(
self,
request: Optional[Union[instance.ListInstancesRequest, dict]] = None,
*,
parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListInstancesPager:
r"""List servers in a given project and location.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import bare_metal_solution_v2
def sample_list_instances():
# Create a client
client = bare_metal_solution_v2.BareMetalSolutionClient()
# Initialize request argument(s)
request = bare_metal_solution_v2.ListInstancesRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_instances(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.bare_metal_solution_v2.types.ListInstancesRequest, dict]):
The request object. Message for requesting the list of
servers.
parent (str):
Required. Parent value for
ListInstancesRequest.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bare_metal_solution_v2.services.bare_metal_solution.pagers.ListInstancesPager:
Response message for the list of
servers.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a instance.ListInstancesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, instance.ListInstancesRequest):
request = instance.ListInstancesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_instances]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListInstancesPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def get_instance(
self,
request: Optional[Union[instance.GetInstanceRequest, dict]] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> instance.Instance:
r"""Get details about a single server.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import bare_metal_solution_v2
def sample_get_instance():
# Create a client
client = bare_metal_solution_v2.BareMetalSolutionClient()
# Initialize request argument(s)
request = bare_metal_solution_v2.GetInstanceRequest(
name="name_value",
)
# Make the request
response = client.get_instance(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.bare_metal_solution_v2.types.GetInstanceRequest, dict]):
The request object. Message for requesting server
information.
name (str):
Required. Name of the resource.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bare_metal_solution_v2.types.Instance:
A server.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a instance.GetInstanceRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, instance.GetInstanceRequest):
request = instance.GetInstanceRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_instance]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def update_instance(
self,
request: Optional[Union[gcb_instance.UpdateInstanceRequest, dict]] = None,
*,
instance: Optional[gcb_instance.Instance] = None,
update_mask: Optional[field_mask_pb2.FieldMask] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Update details of a single server.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import bare_metal_solution_v2
def sample_update_instance():
# Create a client
client = bare_metal_solution_v2.BareMetalSolutionClient()
# Initialize request argument(s)
request = bare_metal_solution_v2.UpdateInstanceRequest(
)
# Make the request
operation = client.update_instance(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.bare_metal_solution_v2.types.UpdateInstanceRequest, dict]):
The request object. Message requesting to updating a
server.
instance (google.cloud.bare_metal_solution_v2.types.Instance):
Required. The server to update.
The ``name`` field is used to identify the instance to
update. Format:
projects/{project}/locations/{location}/instances/{instance}
This corresponds to the ``instance`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
The list of fields to update. The currently supported
fields are: ``labels`` ``hyperthreading_enabled``
``os_image``
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.bare_metal_solution_v2.types.Instance`
A server.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([instance, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a gcb_instance.UpdateInstanceRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, gcb_instance.UpdateInstanceRequest):
request = gcb_instance.UpdateInstanceRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if instance is not None:
request.instance = instance
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_instance]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("instance.name", request.instance.name),)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
gcb_instance.Instance,
metadata_type=baremetalsolution.OperationMetadata,
)
# Done; return the response.
return response
def reset_instance(
self,
request: Optional[Union[instance.ResetInstanceRequest, dict]] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Perform an ungraceful, hard reset on a server.
Equivalent to shutting the power off and then turning it
back on.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import bare_metal_solution_v2
def sample_reset_instance():
# Create a client
client = bare_metal_solution_v2.BareMetalSolutionClient()
# Initialize request argument(s)
request = bare_metal_solution_v2.ResetInstanceRequest(
name="name_value",
)
# Make the request
operation = client.reset_instance(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.bare_metal_solution_v2.types.ResetInstanceRequest, dict]):
The request object. Message requesting to reset a
server.
name (str):
Required. Name of the resource.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.bare_metal_solution_v2.types.ResetInstanceResponse`
Response message from resetting a server.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a instance.ResetInstanceRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, instance.ResetInstanceRequest):
request = instance.ResetInstanceRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.reset_instance]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
baremetalsolution.ResetInstanceResponse,
metadata_type=baremetalsolution.OperationMetadata,
)
# Done; return the response.
return response
def start_instance(
self,
request: Optional[Union[instance.StartInstanceRequest, dict]] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Starts a server that was shutdown.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import bare_metal_solution_v2
def sample_start_instance():
# Create a client
client = bare_metal_solution_v2.BareMetalSolutionClient()
# Initialize request argument(s)
request = bare_metal_solution_v2.StartInstanceRequest(
name="name_value",
)
# Make the request
operation = client.start_instance(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.bare_metal_solution_v2.types.StartInstanceRequest, dict]):
The request object. Message requesting to start a
server.
name (str):
Required. Name of the resource.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.bare_metal_solution_v2.types.StartInstanceResponse`
Response message from starting a server.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a instance.StartInstanceRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, instance.StartInstanceRequest):
request = instance.StartInstanceRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.start_instance]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
instance.StartInstanceResponse,
metadata_type=baremetalsolution.OperationMetadata,
)
# Done; return the response.
return response
def stop_instance(
self,
request: Optional[Union[instance.StopInstanceRequest, dict]] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Stop a running server.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import bare_metal_solution_v2
def sample_stop_instance():
# Create a client
client = bare_metal_solution_v2.BareMetalSolutionClient()
# Initialize request argument(s)
request = bare_metal_solution_v2.StopInstanceRequest(
name="name_value",
)
# Make the request
operation = client.stop_instance(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.bare_metal_solution_v2.types.StopInstanceRequest, dict]):
The request object. Message requesting to stop a server.
name (str):
Required. Name of the resource.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.bare_metal_solution_v2.types.StopInstanceResponse`
Response message from stopping a server.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a instance.StopInstanceRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, instance.StopInstanceRequest):
request = instance.StopInstanceRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.stop_instance]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
instance.StopInstanceResponse,
metadata_type=baremetalsolution.OperationMetadata,
)
# Done; return the response.
return response
def detach_lun(
self,
request: Optional[Union[gcb_instance.DetachLunRequest, dict]] = None,
*,
instance: Optional[str] = None,
lun: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Detach LUN from Instance.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import bare_metal_solution_v2
def sample_detach_lun():
# Create a client
client = bare_metal_solution_v2.BareMetalSolutionClient()
# Initialize request argument(s)
request = bare_metal_solution_v2.DetachLunRequest(
instance="instance_value",
lun="lun_value",
)
# Make the request
operation = client.detach_lun(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.bare_metal_solution_v2.types.DetachLunRequest, dict]):
The request object. Message for detach specific LUN from
an Instance.
instance (str):
Required. Name of the instance.
This corresponds to the ``instance`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
lun (str):
Required. Name of the Lun to detach.
This corresponds to the ``lun`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.bare_metal_solution_v2.types.Instance`
A server.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([instance, lun])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a gcb_instance.DetachLunRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, gcb_instance.DetachLunRequest):
request = gcb_instance.DetachLunRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if instance is not None:
request.instance = instance
if lun is not None:
request.lun = lun
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.detach_lun]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("instance", request.instance),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
gcb_instance.Instance,
metadata_type=baremetalsolution.OperationMetadata,
)
# Done; return the response.
return response
def list_volumes(
self,
request: Optional[Union[volume.ListVolumesRequest, dict]] = None,
*,
parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListVolumesPager:
r"""List storage volumes in a given project and location.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import bare_metal_solution_v2
def sample_list_volumes():
# Create a client
client = bare_metal_solution_v2.BareMetalSolutionClient()
# Initialize request argument(s)
request = bare_metal_solution_v2.ListVolumesRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_volumes(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.bare_metal_solution_v2.types.ListVolumesRequest, dict]):
The request object. Message for requesting a list of
storage volumes.
parent (str):
Required. Parent value for
ListVolumesRequest.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bare_metal_solution_v2.services.bare_metal_solution.pagers.ListVolumesPager:
Response message containing the list
of storage volumes.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a volume.ListVolumesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, volume.ListVolumesRequest):
request = volume.ListVolumesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_volumes]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListVolumesPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def get_volume(
self,
request: Optional[Union[volume.GetVolumeRequest, dict]] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> volume.Volume:
r"""Get details of a single storage volume.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import bare_metal_solution_v2
def sample_get_volume():
# Create a client
client = bare_metal_solution_v2.BareMetalSolutionClient()
# Initialize request argument(s)
request = bare_metal_solution_v2.GetVolumeRequest(
name="name_value",
)
# Make the request
response = client.get_volume(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.bare_metal_solution_v2.types.GetVolumeRequest, dict]):
The request object. Message for requesting storage
volume information.
name (str):
Required. Name of the resource.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bare_metal_solution_v2.types.Volume:
A storage volume.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a volume.GetVolumeRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, volume.GetVolumeRequest):
request = volume.GetVolumeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_volume]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def update_volume(
self,
request: Optional[Union[gcb_volume.UpdateVolumeRequest, dict]] = None,
*,
volume: Optional[gcb_volume.Volume] = None,
update_mask: Optional[field_mask_pb2.FieldMask] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Update details of a single storage volume.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import bare_metal_solution_v2
def sample_update_volume():
# Create a client
client = bare_metal_solution_v2.BareMetalSolutionClient()
# Initialize request argument(s)
request = bare_metal_solution_v2.UpdateVolumeRequest(
)
# Make the request
operation = client.update_volume(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.bare_metal_solution_v2.types.UpdateVolumeRequest, dict]):
The request object. Message for updating a volume.
volume (google.cloud.bare_metal_solution_v2.types.Volume):
Required. The volume to update.
The ``name`` field is used to identify the volume to
update. Format:
projects/{project}/locations/{location}/volumes/{volume}
This corresponds to the ``volume`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
The list of fields to update. The only currently
supported fields are: ``snapshot_auto_delete_behavior``
``snapshot_schedule_policy_name`` 'labels'
'snapshot_enabled'
'snapshot_reservation_detail.reserved_space_percent'
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.bare_metal_solution_v2.types.Volume`
A storage volume.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([volume, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a gcb_volume.UpdateVolumeRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, gcb_volume.UpdateVolumeRequest):
request = gcb_volume.UpdateVolumeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if volume is not None:
request.volume = volume
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_volume]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("volume.name", request.volume.name),)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
gcb_volume.Volume,
metadata_type=baremetalsolution.OperationMetadata,
)
# Done; return the response.
return response
def resize_volume(
self,
request: Optional[Union[gcb_volume.ResizeVolumeRequest, dict]] = None,
*,
volume: Optional[str] = None,
size_gib: Optional[int] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Emergency Volume resize.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import bare_metal_solution_v2
def sample_resize_volume():
# Create a client
client = bare_metal_solution_v2.BareMetalSolutionClient()
# Initialize request argument(s)
request = bare_metal_solution_v2.ResizeVolumeRequest(
volume="volume_value",
)
# Make the request
operation = client.resize_volume(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.bare_metal_solution_v2.types.ResizeVolumeRequest, dict]):
The request object. Request for emergency resize Volume.
volume (str):
Required. Volume to resize.
This corresponds to the ``volume`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
size_gib (int):
New Volume size, in GiB.
This corresponds to the ``size_gib`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.bare_metal_solution_v2.types.Volume`
A storage volume.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([volume, size_gib])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a gcb_volume.ResizeVolumeRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, gcb_volume.ResizeVolumeRequest):
request = gcb_volume.ResizeVolumeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if volume is not None:
request.volume = volume
if size_gib is not None:
request.size_gib = size_gib
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.resize_volume]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("volume", request.volume),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
gcb_volume.Volume,
metadata_type=baremetalsolution.OperationMetadata,
)
# Done; return the response.
return response
def list_networks(
self,
request: Optional[Union[network.ListNetworksRequest, dict]] = None,
*,
parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListNetworksPager:
r"""List network in a given project and location.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import bare_metal_solution_v2
def sample_list_networks():
# Create a client
client = bare_metal_solution_v2.BareMetalSolutionClient()
# Initialize request argument(s)
request = bare_metal_solution_v2.ListNetworksRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_networks(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.bare_metal_solution_v2.types.ListNetworksRequest, dict]):
The request object. Message for requesting a list of
networks.
parent (str):
Required. Parent value for
ListNetworksRequest.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bare_metal_solution_v2.services.bare_metal_solution.pagers.ListNetworksPager:
Response message containing the list
of networks.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a network.ListNetworksRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, network.ListNetworksRequest):
request = network.ListNetworksRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_networks]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListNetworksPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def list_network_usage(
self,
request: Optional[Union[network.ListNetworkUsageRequest, dict]] = None,
*,
location: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> network.ListNetworkUsageResponse:
r"""List all Networks (and used IPs for each Network) in
the vendor account associated with the specified
project.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import bare_metal_solution_v2
def sample_list_network_usage():
# Create a client
client = bare_metal_solution_v2.BareMetalSolutionClient()
# Initialize request argument(s)
request = bare_metal_solution_v2.ListNetworkUsageRequest(
location="location_value",
)
# Make the request
response = client.list_network_usage(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.bare_metal_solution_v2.types.ListNetworkUsageRequest, dict]):
The request object. Request to get networks with IPs.
location (str):
Required. Parent value (project and
location).
This corresponds to the ``location`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bare_metal_solution_v2.types.ListNetworkUsageResponse:
Response with Networks with IPs
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([location])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a network.ListNetworkUsageRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, network.ListNetworkUsageRequest):
request = network.ListNetworkUsageRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if location is not None:
request.location = location
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_network_usage]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("location", request.location),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def get_network(
self,
request: Optional[Union[network.GetNetworkRequest, dict]] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> network.Network:
r"""Get details of a single network.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import bare_metal_solution_v2
def sample_get_network():
# Create a client
client = bare_metal_solution_v2.BareMetalSolutionClient()
# Initialize request argument(s)
request = bare_metal_solution_v2.GetNetworkRequest(
name="name_value",
)
# Make the request
response = client.get_network(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.bare_metal_solution_v2.types.GetNetworkRequest, dict]):
The request object. Message for requesting network
information.
name (str):
Required. Name of the resource.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bare_metal_solution_v2.types.Network:
A Network.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a network.GetNetworkRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, network.GetNetworkRequest):
request = network.GetNetworkRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_network]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def update_network(
self,
request: Optional[Union[gcb_network.UpdateNetworkRequest, dict]] = None,
*,
network: Optional[gcb_network.Network] = None,
update_mask: Optional[field_mask_pb2.FieldMask] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Update details of a single network.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import bare_metal_solution_v2
def sample_update_network():
# Create a client
client = bare_metal_solution_v2.BareMetalSolutionClient()
# Initialize request argument(s)
request = bare_metal_solution_v2.UpdateNetworkRequest(
)
# Make the request
operation = client.update_network(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.bare_metal_solution_v2.types.UpdateNetworkRequest, dict]):
The request object. Message requesting to updating a
network.
network (google.cloud.bare_metal_solution_v2.types.Network):
Required. The network to update.
The ``name`` field is used to identify the instance to
update. Format:
projects/{project}/locations/{location}/networks/{network}
This corresponds to the ``network`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
The list of fields to update. The only currently
supported fields are: ``labels``, ``reservations``
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.bare_metal_solution_v2.types.Network`
A Network.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([network, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a gcb_network.UpdateNetworkRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, gcb_network.UpdateNetworkRequest):
request = gcb_network.UpdateNetworkRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if network is not None:
request.network = network
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_network]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("network.name", request.network.name),)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
gcb_network.Network,
metadata_type=baremetalsolution.OperationMetadata,
)
# Done; return the response.
return response
def get_lun(
self,
request: Optional[Union[lun.GetLunRequest, dict]] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> lun.Lun:
r"""Get details of a single storage logical unit
number(LUN).
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import bare_metal_solution_v2
def sample_get_lun():
# Create a client
client = bare_metal_solution_v2.BareMetalSolutionClient()
# Initialize request argument(s)
request = bare_metal_solution_v2.GetLunRequest(
name="name_value",
)
# Make the request
response = client.get_lun(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.bare_metal_solution_v2.types.GetLunRequest, dict]):
The request object. Message for requesting storage lun
information.
name (str):
Required. Name of the resource.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bare_metal_solution_v2.types.Lun:
A storage volume logical unit number
(LUN).
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a lun.GetLunRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, lun.GetLunRequest):
request = lun.GetLunRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_lun]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def list_luns(
self,
request: Optional[Union[lun.ListLunsRequest, dict]] = None,
*,
parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListLunsPager:
r"""List storage volume luns for given storage volume.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import bare_metal_solution_v2
def sample_list_luns():
# Create a client
client = bare_metal_solution_v2.BareMetalSolutionClient()
# Initialize request argument(s)
request = bare_metal_solution_v2.ListLunsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_luns(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.bare_metal_solution_v2.types.ListLunsRequest, dict]):
The request object. Message for requesting a list of
storage volume luns.
parent (str):
Required. Parent value for
ListLunsRequest.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bare_metal_solution_v2.services.bare_metal_solution.pagers.ListLunsPager:
Response message containing the list
of storage volume luns.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a lun.ListLunsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, lun.ListLunsRequest):
request = lun.ListLunsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_luns]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListLunsPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def get_nfs_share(
self,
request: Optional[Union[nfs_share.GetNfsShareRequest, dict]] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> nfs_share.NfsShare:
r"""Get details of a single NFS share.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import bare_metal_solution_v2
def sample_get_nfs_share():
# Create a client
client = bare_metal_solution_v2.BareMetalSolutionClient()
# Initialize request argument(s)
request = bare_metal_solution_v2.GetNfsShareRequest(
name="name_value",
)
# Make the request
response = client.get_nfs_share(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.bare_metal_solution_v2.types.GetNfsShareRequest, dict]):
The request object. Message for requesting NFS share
information.
name (str):
Required. Name of the resource.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bare_metal_solution_v2.types.NfsShare:
An NFS share.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a nfs_share.GetNfsShareRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, nfs_share.GetNfsShareRequest):
request = nfs_share.GetNfsShareRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_nfs_share]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def list_nfs_shares(
self,
request: Optional[Union[nfs_share.ListNfsSharesRequest, dict]] = None,
*,
parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListNfsSharesPager:
r"""List NFS shares.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import bare_metal_solution_v2
def sample_list_nfs_shares():
# Create a client
client = bare_metal_solution_v2.BareMetalSolutionClient()
# Initialize request argument(s)
request = bare_metal_solution_v2.ListNfsSharesRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_nfs_shares(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.bare_metal_solution_v2.types.ListNfsSharesRequest, dict]):
The request object. Message for requesting a list of NFS
shares.
parent (str):
Required. Parent value for
ListNfsSharesRequest.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bare_metal_solution_v2.services.bare_metal_solution.pagers.ListNfsSharesPager:
Response message containing the list
of NFS shares.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a nfs_share.ListNfsSharesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, nfs_share.ListNfsSharesRequest):
request = nfs_share.ListNfsSharesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_nfs_shares]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListNfsSharesPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def update_nfs_share(
self,
request: Optional[Union[gcb_nfs_share.UpdateNfsShareRequest, dict]] = None,
*,
nfs_share: Optional[gcb_nfs_share.NfsShare] = None,
update_mask: Optional[field_mask_pb2.FieldMask] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Update details of a single NFS share.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import bare_metal_solution_v2
def sample_update_nfs_share():
# Create a client
client = bare_metal_solution_v2.BareMetalSolutionClient()
# Initialize request argument(s)
request = bare_metal_solution_v2.UpdateNfsShareRequest(
)
# Make the request
operation = client.update_nfs_share(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.bare_metal_solution_v2.types.UpdateNfsShareRequest, dict]):
The request object. Message requesting to updating a NFS
share.
nfs_share (google.cloud.bare_metal_solution_v2.types.NfsShare):
Required. The NFS share to update.
The ``name`` field is used to identify the NFS share to
update. Format:
projects/{project}/locations/{location}/nfsShares/{nfs_share}
This corresponds to the ``nfs_share`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
The list of fields to update. The only currently
supported fields are: ``labels``
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.bare_metal_solution_v2.types.NfsShare`
An NFS share.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([nfs_share, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a gcb_nfs_share.UpdateNfsShareRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, gcb_nfs_share.UpdateNfsShareRequest):
request = gcb_nfs_share.UpdateNfsShareRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if nfs_share is not None:
request.nfs_share = nfs_share
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_nfs_share]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("nfs_share.name", request.nfs_share.name),)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
gcb_nfs_share.NfsShare,
metadata_type=baremetalsolution.OperationMetadata,
)
# Done; return the response.
return response
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-bare-metal-solution",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("BareMetalSolutionClient",)
| {
"content_hash": "b6c420283863677b426e0841420f063c",
"timestamp": "",
"source": "github",
"line_count": 2875,
"max_line_length": 126,
"avg_line_length": 40.170086956521736,
"alnum_prop": 0.5913983149910381,
"repo_name": "googleapis/python-bare-metal-solution",
"id": "3fa985a33faddf3b85cf995528f512ad89608e61",
"size": "116089",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/bare_metal_solution_v2/services/bare_metal_solution/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "682571"
},
{
"name": "Shell",
"bytes": "30699"
}
],
"symlink_target": ""
} |
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GLES2 import _types as _cs
# End users want this...
from OpenGL.raw.GLES2._types import *
from OpenGL.raw.GLES2 import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GLES2_ANGLE_translated_shader_source'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GLES2,'GLES2_ANGLE_translated_shader_source',error_checker=_errors._error_checker)
GL_TRANSLATED_SHADER_SOURCE_LENGTH_ANGLE=_C('GL_TRANSLATED_SHADER_SOURCE_LENGTH_ANGLE',0x93A0)
@_f
@_p.types(None,_cs.GLuint,_cs.GLsizei,arrays.GLsizeiArray,arrays.GLcharArray)
def glGetTranslatedShaderSourceANGLE(shader,bufsize,length,source):pass
| {
"content_hash": "16c9f1cc99193936952b71eb35e91cc9",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 133,
"avg_line_length": 46.35294117647059,
"alnum_prop": 0.7829949238578681,
"repo_name": "alexus37/AugmentedRealityChess",
"id": "6fc1138568d191a06ecb203a8009bb3d68c16711",
"size": "788",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/OpenGL/raw/GLES2/ANGLE/translated_shader_source.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "158062"
},
{
"name": "C++",
"bytes": "267993"
},
{
"name": "CMake",
"bytes": "11319"
},
{
"name": "Fortran",
"bytes": "3707"
},
{
"name": "Makefile",
"bytes": "14618"
},
{
"name": "Python",
"bytes": "12813086"
},
{
"name": "Roff",
"bytes": "3310"
},
{
"name": "Shell",
"bytes": "3855"
}
],
"symlink_target": ""
} |
SIMPLE_TYPES = {
'auto': 'key',
'foreignkey': 'key',
'biginteger': 'number',
'decimal': 'number',
'float': 'number',
'integer': 'number',
'positiveinteger': 'number',
'positivesmallinteger': 'number',
'smallinteger': 'number',
'nullboolean': 'boolean',
'char': 'string',
'email': 'string',
'file': 'string',
'filepath': 'string',
'image': 'string',
'ipaddress': 'string',
'slug': 'string',
'text': 'string',
'url': 'string',
}
# A mapping between the client-friendly datatypes and sensible operators
# that will be used to validate a query condition. In many cases, these types
# support more operators than what are defined, but are not include because
# they are not commonly used.
OPERATORS = {
'key': ('exact', '-exact', 'in', '-in'),
'boolean': ('exact', '-exact', 'in', '-in'),
'date': ('exact', '-exact', 'in', '-in', 'lt', 'lte', 'gt', 'gte',
'range', '-range'),
'number': ('exact', '-exact', 'in', '-in', 'lt', 'lte', 'gt', 'gte',
'range', '-range'),
'string': ('exact', '-exact', 'iexact', '-iexact', 'in', '-in',
'icontains', '-icontains', 'iregex', '-iregex'),
'datetime': ('exact', '-exact', 'in', '-in', 'lt', 'lte', 'gt', 'gte',
'range', '-range'),
'time': ('exact', '-exact', 'in', '-in', 'lt', 'lte', 'gt', 'gte',
'range', '-range'),
}
# A general mapping of formfield overrides for all subclasses. the mapping is
# similar to the SIMPLE_TYPE_MAP, but the values reference internal
# formfield classes, that is integer -> IntegerField. in many cases, the
# validation performed may need to be a bit less restrictive than what the
# is actually necessary
INTERNAL_DATATYPE_FORMFIELDS = {
'integer': 'FloatField',
'positiveinteger': 'FloatField',
'positivesmallinteger': 'FloatField',
'smallinteger': 'FloatField',
'biginteger': 'FloatField',
}
# The minimum number of distinct values required when determining to set the
# `searchable` flag on `DataField` instances during the `init` process. This
# will only be applied to fields with a Avocado datatype of 'string'
ENUMERABLE_MAXIMUM = 30
# Flag for enabling the history API
HISTORY_ENABLED = True
# The maximum size of a user's history. If the value is an integer, this
# is the maximum number of allowed items in the user's history. Set to
# `None` (or 0) to enable unlimited history. Note, in order to enforce this
# limit, the `avocado history --prune` command must be executed to remove
# the oldest history from each user based on this value.
HISTORY_MAX_SIZE = None
# App that the metadata migrations will be created for. This is typically the
# project itself.
METADATA_MIGRATION_APP = None
# Directory for the migration backup fixtures. If None, this will default to
# the fixtures dir in the app defined by `METADATA_MIGRATION_APP`
METADATA_FIXTURE_DIR = None
METADATA_FIXTURE_SUFFIX = 'avocado_metadata'
METADATA_MIGRATION_SUFFIX = 'avocado_metadata_migration'
# Query processors
QUERY_PROCESSORS = {
'default': 'avocado.query.pipeline.QueryProcessor',
}
# Custom validation error and warnings messages
VALIDATION_ERRORS = {}
VALIDATION_WARNINGS = {}
# Toggle whether DataField instances should cache the underlying data
# for their most common data access methods.
DATA_CACHE_ENABLED = True
# These settings affect how queries can be shared between users.
# A user is able to enter either a username or an email of another user
# they wish to share the query with. To limit to only one type of sharing
# set the appropriate setting to True and all others to false.
SHARE_BY_USERNAME = True
SHARE_BY_EMAIL = True
SHARE_BY_USERNAME_CASE_SENSITIVE = True
# Toggle whether the permissions system should be enabled.
# If django-guardian is installed and this value is None or True, permissions
# will be applied. If the value is True and django-guardian is not installed
# it is an error. If set to False the permissions will not be applied.
PERMISSIONS_ENABLED = None
# Caches are used to improve performance across various APIs. The two primary
# ones are data and query. Data cache is used for individual data field
# caching such as counts, values, and aggregations. Query cache is used for
# the ad-hoc queries built from a context and view.
DATA_CACHE = 'default'
QUERY_CACHE = 'default'
# Name of the queue to use for scheduling and working on async jobs.
ASYNC_QUEUE = 'avocado'
| {
"content_hash": "b200bf07db739b2c26f4a4a6e5868cd3",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 77,
"avg_line_length": 37.35,
"alnum_prop": 0.6845158411423472,
"repo_name": "murphyke/avocado",
"id": "2a9a4ff85b08be85f0f841415f7ffe00cff3987a",
"size": "4830",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "avocado/conf/global_settings.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "18009"
},
{
"name": "Makefile",
"bytes": "84"
},
{
"name": "Python",
"bytes": "1035156"
},
{
"name": "R",
"bytes": "273"
},
{
"name": "SAS",
"bytes": "689"
},
{
"name": "Shell",
"bytes": "2369"
}
],
"symlink_target": ""
} |
import json
import requests
import argparse
import os
SERVER_URL = 'https://patchwork.kernel.org/api/1.1'
USERNAME = 'kvalo'
PROJECT = 'linux-wireless'
# set patchwork token in PATCHWORK_TOKEN enviroment variable, but most
# of the commands will work without the token anyway
def get_auth_headers():
headers = {}
# if only set the token
if 'PATCHWORK_TOKEN' in os.environ:
headers['Authorization'] = 'Token %s' % (os.environ['PATCHWORK_TOKEN'])
return headers
# state names listed in patchwork/fixtures/default_states.xml
def rest_get(args, url, params=None):
if not url.startswith(SERVER_URL):
url = '%s%s' % (SERVER_URL, url)
r = requests.get(url, headers=get_auth_headers(), params=params)
if args.dump:
print('----------------------------------------------------------------------')
print('%s:' % url)
print(json.dumps(r.json(), indent=2))
print('----------------------------------------------------------------------')
r.raise_for_status()
return r
def rest_patch(args, url, json):
if not url.startswith(SERVER_URL):
url = '%s%s' % (SERVER_URL, url)
headers = get_auth_headers()
headers['Content-Type'] = 'application/json'
r = requests.patch(url, json=json, headers=headers)
if args.dump:
print('----------------------------------------------------------------------')
print('%s:' % url)
print(json.dumps(r.json(), indent=2))
print('----------------------------------------------------------------------')
r.raise_for_status()
return r
def cmd_root(args):
rest_get(args, '/')
def cmd_patch1(args):
# Formatting: remove leading space
r = rest_get(args, '/patches/11484825/')
j = r.json()
print(type(j['submitter']['name']))
# Weird, this print causes an exception with --dump:
#
# $ ./rest-test-1.py --dump patch1 1>/dev/null
# Traceback (most recent call last):
# File "./rest-test-1.py", line 111, in <module>
# main()
# File "./rest-test-1.py", line 108, in main
# args.func(args)
# File "./rest-test-1.py", line 45, in cmd_patch1
# print(j['submitter']['name'])
# UnicodeEncodeError: 'ascii' codec can't encode character u'\xc9' in position 9: ordinal not in range(128)
#
# Also like this:
#
# $ ./rest-test-1.py patch1 1>/dev/null
# Traceback (most recent call last):
# File "./rest-test-1.py", line 119, in <module>
# main()
# File "./rest-test-1.py", line 116, in main
# args.func(args)
# File "./rest-test-1.py", line 53, in cmd_patch1
# print(j['submitter']['name'])
# UnicodeEncodeError: 'ascii' codec can't encode character u'\xc9' in position 9: ordinal not in range(128)
print(j['submitter']['name'])
rest_get(args, '/patches/11484825/comments/')
def cmd_patch2(args):
# rtw88: add support for 802.11n RTL8723DE devices
r = rest_get(args, '/patches/11494413/')
j = r.json()
print(j['submitter']['name'], type(j['submitter']['name']))
rest_get(args, '/series/272629/')
rest_get(args, '/covers/11494407/')
rest_get(args, '/covers/11494407/comments/')
def cmd_auth1(args):
pass
def cmd_users1(args):
rest_get(args, '/users/?q=kvalo')
def cmd_patches1(args):
r = rest_get(args, '/patches/', { 'project' : PROJECT,
'delegate' : USERNAME,
'state' : [ 'new', 'under-review' ] })
j = r.json()
print('%s patches' % (len(j)))
for patch in j:
print(patch['submitter']['name'], patch['name'], patch['state'],
patch['date'])
def cmd_patches2(args):
r = rest_get(args, '/patches/', { 'project' : PROJECT,
'delegate' : USERNAME,
'state' : 'deferred' })
while True:
j = r.json()
print('%s patches' % (len(j)))
if 'next' not in r.links:
# no more pages
break
r = rest_get(args, r.links['next']['url'])
def cmd_events1(args):
r = rest_get(args, '/events/', { 'patch': '11484825' } )
j = r.json()
for event in j:
print(event['category'], event['date'])
def cmd_set_state1(args):
# [v2] ath10k: add retry mechanism for ath10k_start
r = rest_patch(args, '/patches/11340881/', { 'state': 'new' })
j = r.json()
print(j['name'], j['state'], j['delegate']['username'])
def cmd_set_delegate1(args):
# kvalo's id is 25621
r = rest_patch(args, '/patches/11340881/', { 'delegate': 25621 })
j = r.json()
print(j['name'], j['state'], j['delegate']['username'])
def cmd_mbox1(args):
# rtw88: add support for 802.11n RTL8723DE devices
r = rest_get(args, '/patches/11494413/')
j = r.json()
print(j['mbox'])
r = requests.get(j['mbox'], headers=get_auth_headers())
print(r.text)
def cmd_tags1(args):
# [v2,06/18] mt76: add mac80211 driver for MT7915 PCIe-based chipsets
r = rest_get(args, '/patches/11489259/')
j = r.json()
print('%s: tags %r' % (j['name'], j['tags']))
print(j['content'])
def cmd_series1(args):
r = rest_get(args, '/series/', { 'project' : PROJECT })
j = r.json()
print('%s series' % (len(j)))
for series in j:
print(series['submitter']['name'], series['name'], series['date'],
'%d patches' % len(series['patches']))
def cmd_series2(args):
# [PATCHv2,1/4] ath10k: Add wmi command support for station specific TID config
r = rest_get(args, '/patches/11643791/')
patch = r.json()
# note: for some reason series is an array, just use the first one
# and ignore the rest
print(patch['series'][0]['id'], patch['series'][0]['name'])
# fetch the full series
r = rest_get(args, '/series/%d/' % (patch['series'][0]['id']))
series = r.json()
print(series['cover_letter']['id'], series['cover_letter']['web_url'])
def cmd_utf1(args):
# carl9170: remove P2P_GO support
r = rest_get(args, '/patches/11509803/')
j = r.json()
print(j['mbox'])
r = requests.get(j['mbox'], headers=get_auth_headers())
print(r.encoding)
r.encoding = 'utf-8'
text = r.text
print(type(text))
s = []
for l in text.splitlines():
s.append(ascii(l))
if l.startswith('Reported-by:'):
print(type(l))
print(l)
print(ascii(l))
#print('\n'.join(s))
def cmd_pull1(args):
r = rest_get(args, '/patches/11507893/')
j = r.json()
print(repr(j['pull_url']))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--dump', action='store_true')
subparsers = parser.add_subparsers()
subparsers.add_parser('root').set_defaults(func=cmd_root)
subparsers.add_parser('patch1').set_defaults(func=cmd_patch1)
subparsers.add_parser('patch2').set_defaults(func=cmd_patch2)
subparsers.add_parser('auth1').set_defaults(func=cmd_auth1)
subparsers.add_parser('users1').set_defaults(func=cmd_users1)
subparsers.add_parser('patches1').set_defaults(func=cmd_patches1)
subparsers.add_parser('patches2').set_defaults(func=cmd_patches2)
subparsers.add_parser('events1').set_defaults(func=cmd_events1)
subparsers.add_parser('set-state1').set_defaults(func=cmd_set_state1)
subparsers.add_parser('set-delegate1').set_defaults(func=cmd_set_delegate1)
subparsers.add_parser('mbox1').set_defaults(func=cmd_mbox1)
subparsers.add_parser('tags1').set_defaults(func=cmd_tags1)
subparsers.add_parser('series1').set_defaults(func=cmd_series1)
subparsers.add_parser('series2').set_defaults(func=cmd_series2)
subparsers.add_parser('utf1').set_defaults(func=cmd_utf1)
subparsers.add_parser('pull1').set_defaults(func=cmd_pull1)
args = parser.parse_args()
args.func(args)
if __name__ == "__main__":
main()
| {
"content_hash": "f6e97dbdd732087f7e9e6dce463002db",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 111,
"avg_line_length": 32.068548387096776,
"alnum_prop": 0.5737457563183704,
"repo_name": "kvalo/pwcli",
"id": "66806bc1450f7779893d66ea20cfe6ffeaa0d20a",
"size": "9527",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "stubs/rest-test-1.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "326413"
},
{
"name": "Shell",
"bytes": "191"
}
],
"symlink_target": ""
} |
import base64
from twisted.internet import reactor, protocol
import os
PORT = 8000
import struct
def get_bytes_from_file(filename):
return open(filename, "rb").read()
# remove this for distribution of server
KEY = "WoAh_A_Key!?"
def length_encryption_key():
return len(KEY)
def get_magic_png():
image = get_bytes_from_file("./sleeping.png")
encoded_string = base64.b64encode(image)
key_len = length_encryption_key()
print 'Sending magic....'
if key_len != 12:
return ''
return encoded_string
class MyServer(protocol.Protocol):
def connectionMade(self):
resp = get_magic_png()
self.transport.write(resp)
class MyServerFactory(protocol.Factory):
protocol = MyServer
factory = MyServerFactory()
reactor.listenTCP(PORT, factory)
reactor.run()
| {
"content_hash": "b377da2a78404522dfac04545a8b3072",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 49,
"avg_line_length": 22.2972972972973,
"alnum_prop": 0.6812121212121212,
"repo_name": "trailofbits/greenhorn",
"id": "e76294e5dd03b63ab721bfed4bbb3d12f4709617",
"size": "825",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "csaw-2016/Sleeping_Guard/sleeping.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "633"
},
{
"name": "C++",
"bytes": "9868"
},
{
"name": "Python",
"bytes": "1309"
}
],
"symlink_target": ""
} |
from django.test import TestCase
class Test1(TestCase):
def test_init(self):
# Arrange
# Act
response = self.client.get(path='/gmod/')
#Assert
self.assertTemplateUsed(response, 'gmod/gmod.html') | {
"content_hash": "a6f81b909877de65394db7e916b173b3",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 59,
"avg_line_length": 15.375,
"alnum_prop": 0.6016260162601627,
"repo_name": "aldenjenkins/foobargamingwebsite",
"id": "9802b1f96472044de4686d9811954286905e012e",
"size": "246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gmod/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "654042"
},
{
"name": "HTML",
"bytes": "460378"
},
{
"name": "JavaScript",
"bytes": "664289"
},
{
"name": "Python",
"bytes": "1450807"
},
{
"name": "SourcePawn",
"bytes": "6208"
}
],
"symlink_target": ""
} |
import platform
import unittest
import pytest
from conans.test.utils.tools import TestClient
conanfile_py = """
from conans import ConanFile
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
"""
conanfile = """[requires]
Hello/0.1@lasote/testing
"""
cmake = """set(CMAKE_CXX_COMPILER_WORKS 1)
set(CMAKE_CXX_ABI_COMPILED 1)
project(MyHello CXX)
cmake_minimum_required(VERSION 2.8.12)
include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
conan_basic_setup(TARGETS %s)
IF(APPLE AND CMAKE_SKIP_RPATH)
MESSAGE(FATAL_ERROR "RPath was skipped")
ENDIF()
"""
@pytest.mark.tool_cmake
class CMakeSkipRpathTest(unittest.TestCase):
def test_skip_flag(self):
for way_to_skip in ("SKIP_RPATH", "KEEP_RPATHS"):
client = TestClient()
client.save({"conanfile.py": conanfile_py})
client.run("export . lasote/testing")
client.save({"conanfile.txt": conanfile,
"CMakeLists.txt": cmake % way_to_skip}, clean_first=True)
client.run('install . -g cmake --build')
generator = '-G "Visual Studio 15 Win64"' if platform.system() == "Windows" else ""
client.run_command("cmake . %s" % generator)
self.assertNotIn("Conan: Adjusting default RPATHs Conan policies", client.out)
self.assertIn("Build files have been written", client.out)
if way_to_skip == "SKIP_RPATH":
self.assertIn("Conan: SKIP_RPATH is deprecated, it has been renamed to KEEP_RPATHS",
client.out)
client.save({"conanfile.txt": conanfile,
"CMakeLists.txt": (cmake % way_to_skip).replace("TARGETS", "")},
clean_first=True)
client.run('install . -g cmake --build')
client.run_command("cmake . %s" % generator)
self.assertNotIn("Conan: Adjusting default RPATHs Conan policies", client.out)
self.assertIn("Build files have been written", client.out)
client.save({"conanfile.txt": conanfile,
"CMakeLists.txt": (cmake % "").replace("FATAL_ERROR", "INFO")},
clean_first=True)
if platform.system() == "Darwin":
client.run('install . -g cmake --build')
client.run_command("cmake .")
self.assertIn("Conan: Adjusting default RPATHs Conan policies", client.out)
self.assertIn("Build files have been written", client.out)
self.assertIn("RPath was skipped", client.out)
| {
"content_hash": "f21b1b90add4157d3d4d7ddce0730946",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 100,
"avg_line_length": 36.013888888888886,
"alnum_prop": 0.5969919012726571,
"repo_name": "conan-io/conan",
"id": "f2bbf07e30080dcf6da515ad1c7631df3756974f",
"size": "2593",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "conans/test/functional/generators/cmake_skip_rpath_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "264"
},
{
"name": "C++",
"bytes": "425"
},
{
"name": "CMake",
"bytes": "447"
},
{
"name": "Python",
"bytes": "8209945"
}
],
"symlink_target": ""
} |
import scrapy
from scrapy.crawler import CrawlerProcess
from twisted.internet import selectreactor
selectreactor.install()
class NoRequestsSpider(scrapy.Spider):
name = 'no_request'
def start_requests(self):
return []
process = CrawlerProcess(settings={
"TWISTED_REACTOR": "twisted.internet.selectreactor.SelectReactor",
})
process.crawl(NoRequestsSpider)
process.start()
| {
"content_hash": "7efcdeab3f5a767d918cb9677c734155",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 70,
"avg_line_length": 18.227272727272727,
"alnum_prop": 0.7556109725685786,
"repo_name": "elacuesta/scrapy",
"id": "e0d2dab2652e3fdad57f8ede20a6a482e68a65ae",
"size": "401",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/CrawlerProcess/reactor_select_twisted_reactor_select.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "3237"
},
{
"name": "Python",
"bytes": "2011549"
},
{
"name": "Roff",
"bytes": "2010"
},
{
"name": "Shell",
"bytes": "259"
}
],
"symlink_target": ""
} |
from functools import wraps
from flask import current_app, g, request, redirect, url_for, session, _app_ctx_stack, _request_ctx_stack
from werkzeug.local import LocalProxy
from models import User, Project, Experiment
current_project = LocalProxy(lambda: _get_project())
current_experiment = LocalProxy(lambda: _get_experiment())
class Current(object):
'''
Manages the current project and experiment that a user is navigating.
Requires a project_loader: ::\n
@current.project_loader
def load_project(project_id):
return Project.objects(name = project_id)[0]
'''
def __init__(self, app=None, add_context_processor=True):
self.project_callback = None
self.experiment_callback = None
if app is not None:
self.init_app(app, add_context_processor)
def init_app(self, app, add_context_processor=True):
app.current = self
if add_context_processor:
app.context_processor(_project_context_processor)
app.context_processor(_experiment_context_processor)
def project_loader(self, loader):
self.project_callback = loader
def experiment_loader(self, loader):
self.experiment_callback = loader
def set_project(project = None, project_id = None):
'''
Set's the project as the current project.
Either the project object or the project_id can be passed to this.
'''
# Now add this project to the app context stack
if not project == None:
session['project_id'] = project.id
elif not project_id == None:
if current_app.current.project_callback == None:
raise Exception("Please register a project loader method using the Current.project_loader decorator")
session['project_id'] = project_id
else:
del session['project_id']
# raise Exception(" Either project_id or project object must be provided")
def set_experiment(experiment = None, experiment_id = None):
'''
Set's the experiment as the current experiment.
Either the experiment object or the exp_uid can be passed to this.
'''
# Now add this project to the app context stack
if not experiment == None:
session['experiment_id'] = experiment.id
elif not experiment_id == None:
if current_app.current.experiment_callback == None:
raise Exception("Please register an experiment loader method using the Current.experiment_loader decorator")
session["experiment_id"] = experiment_id
else:
del session["experiment_id"]
# raise Exception(" Either experiment_id or experiment object must be provided")
def project_required(func):
'''
Decorator for a view to ensure that a project is active. If a project is not available, will kick the user to their dashboard.
'''
@wraps(func)
def decorated_view(*args, **kwargs):
if current_project == None:
# Eventually generalize this!!
return redirect(url_for('dashboard._dashboard'))
return func(*args, **kwargs)
return decorated_view
def experiment_required(func):
'''
Decorator for a view to ensure that a project is active. If a project is not available, will kick the user to their dashboard.
'''
@wraps(func)
def decorated_view(*args, **kwargs):
if current_experiment == None:
# Eventually generalize this!!
return redirect(url_for('dashboard._dashboard'))
return func(*args, **kwargs)
return decorated_view
def _project_context_processor():
return dict(current_project=_get_project())
def _experiment_context_processor():
return dict(current_experiment=_get_experiment())
def _get_project():
if 'project_id' in session.keys():
return current_app.current.project_callback(session['project_id'])
else:
return None
def _get_experiment():
if 'experiment_id' in session.keys():
return current_app.current.experiment_callback(session['experiment_id'])
else:
return None
| {
"content_hash": "5a65b57e51ab2e0dde47619bccff4626",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 130,
"avg_line_length": 35.530434782608694,
"alnum_prop": 0.6627508565834557,
"repo_name": "lalitkumarj/NEXT-psych",
"id": "ea77e3fddfe81f5a4ab44a5579c5ccfda286097f",
"size": "4086",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gui/base/current.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "197644"
},
{
"name": "HTML",
"bytes": "358541"
},
{
"name": "Java",
"bytes": "1496"
},
{
"name": "JavaScript",
"bytes": "2714780"
},
{
"name": "Makefile",
"bytes": "2880"
},
{
"name": "Perl",
"bytes": "5546"
},
{
"name": "Python",
"bytes": "782418"
},
{
"name": "Shell",
"bytes": "7340"
}
],
"symlink_target": ""
} |
import pickle
import pytest
from pydantic import BaseModel, Protocol, ValidationError
try:
import msgpack
except ImportError:
msgpack = None
class Model(BaseModel):
a: float = ...
b: int = 10
def test_obj():
m = Model.parse_obj(dict(a=10.2))
assert str(m) == 'Model a=10.2 b=10'
def test_fails():
with pytest.raises(ValidationError) as exc_info:
Model.parse_obj([1, 2, 3])
assert """\
error validating input
Model expected dict not list (error_type=TypeError)""" == str(exc_info.value)
def test_json():
assert Model.parse_raw('{"a": 12, "b": 8}') == Model.construct(a=12, b=8)
def test_json_ct():
assert Model.parse_raw('{"a": 12, "b": 8}', content_type='application/json') == Model.construct(a=12, b=8)
@pytest.mark.skipif(not msgpack, reason='msgpack not installed')
def test_msgpack_proto(mocker):
# b'\x82\xa1a\x0c\xa1b\x08' == msgpack.packb(dict(a=12, b=8))
assert Model.parse_raw(b'\x82\xa1a\x0c\xa1b\x08', proto=Protocol.msgpack) == Model.construct(a=12, b=8)
@pytest.mark.skipif(not msgpack, reason='msgpack not installed')
def test_msgpack_ct():
assert Model.parse_raw(b'\x82\xa1a\x0c\xa1b\x08', content_type='application/msgpack') == Model.construct(a=12, b=8)
@pytest.mark.skipif(msgpack, reason='msgpack installed')
def test_msgpack_not_installed_proto(mocker):
with pytest.raises(ImportError) as exc_info:
Model.parse_raw(b'\x82\xa1a\x0c\xa1b\x08', proto=Protocol.msgpack)
assert "ImportError: msgpack not installed, can't parse data" in str(exc_info)
@pytest.mark.skipif(msgpack, reason='msgpack installed')
def test_msgpack_not_installed_ct():
with pytest.raises(ValidationError) as exc_info:
Model.parse_raw(b'\x82\xa1a\x0c\xa1b\x08', content_type='application/msgpack')
assert """\
error validating input
Unknown content-type: application/msgpack (error_type=TypeError)""" == str(exc_info.value)
def test_pickle_ct():
data = pickle.dumps(dict(a=12, b=8))
assert Model.parse_raw(data, content_type='application/pickle', allow_pickle=True) == Model.construct(a=12, b=8)
def test_pickle_proto():
data = pickle.dumps(dict(a=12, b=8))
assert Model.parse_raw(data, proto=Protocol.pickle, allow_pickle=True) == Model.construct(a=12, b=8)
def test_pickle_not_allowed():
data = pickle.dumps(dict(a=12, b=8))
with pytest.raises(RuntimeError):
Model.parse_raw(data, proto=Protocol.pickle)
def test_bad_ct():
with pytest.raises(ValidationError) as exc_info:
Model.parse_raw('{"a": 12, "b": 8}', content_type='application/missing')
assert """\
error validating input
Unknown content-type: application/missing (error_type=TypeError)""" == str(exc_info.value)
def test_bad_proto():
with pytest.raises(ValidationError) as exc_info:
Model.parse_raw('{"a": 12, "b": 8}', proto='foobar')
assert """\
error validating input
Unknown protocol: foobar (error_type=TypeError)""" == str(exc_info.value)
def test_file_json(tmpdir):
p = tmpdir.join('test.json')
p.write('{"a": 12, "b": 8}')
assert Model.parse_file(str(p)) == Model.construct(a=12, b=8)
def test_file_json_no_ext(tmpdir):
p = tmpdir.join('test')
p.write('{"a": 12, "b": 8}')
assert Model.parse_file(str(p)) == Model.construct(a=12, b=8)
@pytest.mark.skipif(not msgpack, reason='msgpack not installed')
def test_file_msgpack(tmpdir):
p = tmpdir.join('test.mp')
p.write_binary(b'\x82\xa1a\x0c\xa1b\x08')
assert Model.parse_file(str(p)) == Model.construct(a=12, b=8)
def test_file_pickle(tmpdir):
p = tmpdir.join('test.pkl')
p.write_binary(pickle.dumps(dict(a=12, b=8)))
assert Model.parse_file(str(p), allow_pickle=True) == Model.construct(a=12, b=8)
def test_file_pickle_no_ext(tmpdir):
p = tmpdir.join('test')
p.write_binary(pickle.dumps(dict(a=12, b=8)))
assert Model.parse_file(str(p), content_type='application/pickle', allow_pickle=True) == Model.construct(a=12, b=8)
| {
"content_hash": "66adfbd7fd6625e15c4d52972b952fd9",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 119,
"avg_line_length": 31.626984126984127,
"alnum_prop": 0.6745294855708909,
"repo_name": "petroswork/pydantic",
"id": "3fc9153d8814194c2c2a0a082d82672f8163bbac",
"size": "3985",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_parse.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1670"
},
{
"name": "Python",
"bytes": "126664"
}
],
"symlink_target": ""
} |
import os
import flask_testing
import unittest
from app import app, db
from blapi.authorization.models import User
from blapi.authorization.tests.factories import UserFactory
class TestAuthorizationModels(flask_testing.TestCase):
def create_app(self):
app.config['SQLALCHEMY_DATABASE_URI'] = \
"sqlite:///test_bucketlist_models.sqlite"
app.config['TESTING'] = True
return app
def setUp(self):
db.create_all()
factory_user = UserFactory()
user_details = User(
full_name=factory_user.full_name,
email=factory_user.email,
password=factory_user.password,
active=True,
)
app.config['db_user_details'] = factory_user
db.session.add(user_details)
db.session.commit()
def tearDown(self):
db.session.remove()
db.drop_all()
os.remove("test_bucketlist_models.sqlite")
def test_create_user(self):
registered_user = app.config['db_user_details']
self.assertEqual(
registered_user.email,
db.session.query(User).filter_by(
email=registered_user.email).one().email,
msg="Cannot create user"
)
def test_update_user(self):
current_user = User.query.filter_by(
full_name=app.config['db_user_details'].full_name).one()
current_user.full_name = "Updated Name"
db.session.add(current_user)
db.session.commit()
self.assertEqual(
"Updated Name",
User.query.filter_by(
email=app.config['db_user_details'].email).one().full_name
)
def test_delete_user(self):
current_user = User.query.filter_by(
full_name=app.config['db_user_details'].full_name).one()
db.session.delete(current_user)
db.session.commit()
self.assertEqual(
len(User.query.filter_by(
email=app.config['db_user_details'].email).all()),
0
)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "e379fc4b8ff9442b7c96787e99dd8299",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 74,
"avg_line_length": 28.698630136986303,
"alnum_prop": 0.5866348448687351,
"repo_name": "andela-jkamau/blapi",
"id": "9536c310e2b6773cde67980bca7bf169e759e4fc",
"size": "2095",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blapi/authorization/tests/test_models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33277"
}
],
"symlink_target": ""
} |
import analyticsclient.constants.data_format as DF
class Module(object):
""" Module related analytics data. """
def __init__(self, client, course_id, module_id):
"""
Initialize the Module client.
Arguments:
client (analyticsclient.client.Client): The client to use to access the API.
course_id (str): String identifying the course
module_id (str): String identifying the module
"""
self.client = client
self.course_id = unicode(course_id)
self.module_id = unicode(module_id)
def answer_distribution(self, data_format=DF.JSON):
"""
Get answer distribution data for a module.
Arguments:
data_format (str): Format in which to return data (default is JSON)
"""
path = 'problems/{0}/answer_distribution/'.format(self.module_id)
return self.client.get(path, data_format=data_format)
def grade_distribution(self, data_format=DF.JSON):
"""
Get grade distribution data for a module.
Arguments:
data_format (str): Format in which to return data (default is JSON)
"""
path = 'problems/{0}/grade_distribution/'.format(self.module_id)
return self.client.get(path, data_format=data_format)
def sequential_open_distribution(self, data_format=DF.JSON):
"""
Get open distribution data for a module.
Arguments:
data_format (str): Format in which to return data (default is JSON)
"""
path = 'problems/{0}/sequential_open_distribution/'.format(self.module_id)
return self.client.get(path, data_format=data_format)
def video_timeline(self, data_format=DF.JSON):
"""
Get video segments/timeline for a module.
Arguments:
data_format (str): Format in which to return data (default is JSON)
"""
path = 'videos/{0}/timeline/'.format(self.module_id)
return self.client.get(path, data_format=data_format)
| {
"content_hash": "e214a2f2dcb3bf982e3527da7caefc25",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 88,
"avg_line_length": 33.04838709677419,
"alnum_prop": 0.6168862859931674,
"repo_name": "open-craft/edx-analytics-data-api-client",
"id": "2df0c2e2fc46ae4b2b88cb3c817516e23d0e6fac",
"size": "2049",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "analyticsclient/module.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "604"
},
{
"name": "Python",
"bytes": "34095"
},
{
"name": "Shell",
"bytes": "784"
}
],
"symlink_target": ""
} |
from swmmtoolbox import *
| {
"content_hash": "51005efd4fc980e66c4aaefb6c480149",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 25,
"avg_line_length": 26,
"alnum_prop": 0.8076923076923077,
"repo_name": "lucashtnguyen/swmmtoolbox",
"id": "4cfe9b00009fca044638b86e7daefc39319bde8a",
"size": "26",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5104"
},
{
"name": "Makefile",
"bytes": "5580"
},
{
"name": "Python",
"bytes": "30952"
}
],
"symlink_target": ""
} |
input = """
% a and b are the top-level PTs.
a | b.
% After taking a, c and d are the PTs and each of them leads to a model.
c | d :- a.
% After backtracking, not a is propagated and, among others, b is derived.
% So g and h are additional PTs in the top level now.
g | h :- b, not a.
% g directly leads to an inconsistency, and so does h.
% Also not g and not h lead to an inconsistency!
% So, we should immediately backtrack.
:- f.
f :- g, not h.
f :- h, not g.
% i and j should never be derived, since then also one of g or h must be true.
i | j :- b, not a.
"""
output = """
% a and b are the top-level PTs.
a | b.
% After taking a, c and d are the PTs and each of them leads to a model.
c | d :- a.
% After backtracking, not a is propagated and, among others, b is derived.
% So g and h are additional PTs in the top level now.
g | h :- b, not a.
% g directly leads to an inconsistency, and so does h.
% Also not g and not h lead to an inconsistency!
% So, we should immediately backtrack.
:- f.
f :- g, not h.
f :- h, not g.
% i and j should never be derived, since then also one of g or h must be true.
i | j :- b, not a.
"""
| {
"content_hash": "a179a2d385b3c6fd8667c7e5cbcca365",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 78,
"avg_line_length": 26.931818181818183,
"alnum_prop": 0.6270042194092827,
"repo_name": "veltri/DLV2",
"id": "9793a24383544b4e8085ef892d28ecfa0bf1393a",
"size": "1185",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/parser/choice.21.test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "34165"
},
{
"name": "C++",
"bytes": "446857"
},
{
"name": "Perl",
"bytes": "3509"
},
{
"name": "Python",
"bytes": "1067593"
},
{
"name": "Shell",
"bytes": "1299"
}
],
"symlink_target": ""
} |
import os, io
import numpy as np # noqa (API import)
import param
__version__ = str(param.version.Version(fpath=__file__, archive_commit="$Format:%h$",
reponame="holoviews"))
from . import util # noqa (API import)
from .annotators import annotate # noqa (API import)
from .core import archive, config # noqa (API import)
from .core.boundingregion import BoundingBox # noqa (API import)
from .core.dimension import OrderedDict, Dimension # noqa (API import)
from .core.element import Element, Collator # noqa (API import)
from .core.layout import (Layout, NdLayout, Empty, # noqa (API import)
AdjointLayout)
from .core.ndmapping import NdMapping # noqa (API import)
from .core.options import (Options, Store, Cycle, # noqa (API import)
Palette, StoreOptions)
from .core.overlay import Overlay, NdOverlay # noqa (API import)
from .core.spaces import (HoloMap, Callable, DynamicMap, # noqa (API import)
GridSpace, GridMatrix)
from .operation import Operation # noqa (API import)
from .element import * # noqa (API import)
from .element import __all__ as elements_list
from .selection import link_selections # noqa (API import)
from .util import (extension, renderer, output, opts, # noqa (API import)
render, save)
from .util.transform import dim # noqa (API import)
# Suppress warnings generated by NumPy in matplotlib
# Expected to be fixed in next matplotlib release
import warnings
warnings.filterwarnings("ignore",
message="elementwise comparison failed; returning scalar instead")
try:
import IPython # noqa (API import)
from .ipython import notebook_extension
extension = notebook_extension # noqa (name remapping)
except ImportError:
class notebook_extension(param.ParameterizedFunction):
def __call__(self, *args, **opts): # noqa (dummy signature)
raise Exception("IPython notebook not available: use hv.extension instead.")
# A single holoviews.rc file may be executed if found.
for rcfile in [os.environ.get("HOLOVIEWSRC", ''),
os.path.abspath(os.path.join(os.path.split(__file__)[0],
'..', 'holoviews.rc')),
"~/.holoviews.rc",
"~/.config/holoviews/holoviews.rc"]:
filename = os.path.expanduser(rcfile)
if os.path.isfile(filename):
with io.open(filename, encoding='utf8') as f:
code = compile(f.read(), filename, 'exec')
try:
exec(code)
except Exception as e:
print("Warning: Could not load %r [%r]" % (filename, str(e)))
del f, code
break
del filename
def help(obj, visualization=True, ansi=True, backend=None,
recursive=False, pattern=None):
"""
Extended version of the built-in help that supports parameterized
functions and objects. A pattern (regular expression) may be used to
filter the output and if recursive is set to True, documentation for
the supplied object is shown. Note that the recursive option will
only work with an object instance and not a class.
If ansi is set to False, all ANSI color
codes are stripped out.
"""
backend = backend if backend else Store.current_backend
info = Store.info(obj, ansi=ansi, backend=backend, visualization=visualization,
recursive=recursive, pattern=pattern, elements=elements_list)
msg = ("\nTo view the visualization options applicable to this "
"object or class, use:\n\n"
" holoviews.help(obj, visualization=True)\n\n")
if info:
print((msg if visualization is False else '') + info)
else:
import pydoc
pydoc.help(obj)
del io, np, os, rcfile, warnings
| {
"content_hash": "68fd5f160c3aed475f4b9cee1a877497",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 90,
"avg_line_length": 45.417582417582416,
"alnum_prop": 0.606823130897653,
"repo_name": "ioam/holoviews",
"id": "46c225ab4b40d2bc98b769a123f315eec76dbbfa",
"size": "4133",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "holoviews/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1546"
},
{
"name": "HTML",
"bytes": "18997"
},
{
"name": "JavaScript",
"bytes": "20747"
},
{
"name": "Jupyter Notebook",
"bytes": "1379"
},
{
"name": "Python",
"bytes": "3241652"
}
],
"symlink_target": ""
} |
from fastapi import FastAPI
from httpx import AsyncClient
import pytest
class TestUsersRoutes:
@pytest.mark.asyncio
async def test_ping_status(self, app: FastAPI, client: AsyncClient) -> None:
"""Test grid client status API."""
res = await client.get(app.url_path_for("ping"))
assert res is not None
assert res.status_code == 200
assert res.json() == "pong"
| {
"content_hash": "55d1a969e5d78ffbd41c8e20c7034f8d",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 80,
"avg_line_length": 29.214285714285715,
"alnum_prop": 0.6601466992665037,
"repo_name": "OpenMined/PySyft",
"id": "53a55326076b1bab7b7171003e37b2edf01d082b",
"size": "423",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "packages/grid/backend/grid/tests/api/meta/meta_routes_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2084"
},
{
"name": "Cap'n Proto",
"bytes": "1377"
},
{
"name": "Dockerfile",
"bytes": "9740"
},
{
"name": "HCL",
"bytes": "4438"
},
{
"name": "JavaScript",
"bytes": "85898"
},
{
"name": "Jupyter Notebook",
"bytes": "33167760"
},
{
"name": "Makefile",
"bytes": "7605"
},
{
"name": "Mako",
"bytes": "510"
},
{
"name": "PowerShell",
"bytes": "161"
},
{
"name": "Python",
"bytes": "3710174"
},
{
"name": "Shell",
"bytes": "52371"
},
{
"name": "TypeScript",
"bytes": "346493"
}
],
"symlink_target": ""
} |
import yaml
import pandas as pd
from airflow.models import Variable
import logging, os, requests, subprocess, re, shutil, gzip
from igf_airflow.logging.upload_log_msg import send_log_to_channels
from igf_data.utils.fileutils import check_file_path, get_temp_dir, copy_local_file, get_datestamp_label
from igf_data.utils.dbutils import read_dbconf_json
from igf_data.igfdb.fileadaptor import FileAdaptor
from igf_data.igfdb.analysisadaptor import AnalysisAdaptor
from airflow.operators.trigger_dagrun import TriggerDagRunOperator
DATABASE_CONFIG_FILE = \
Variable.get('database_config_file', default_var=None)
SLACK_CONF = \
Variable.get('slack_conf', default_var=None)
MS_TEAMS_CONF = \
Variable.get('ms_teams_conf', default_var=None)
ANALYSIS_LOOKUP_DIR = \
Variable.get("analysis_lookup_dir", default_var=None)
ANALYSIS_TRIGGER_FILE = \
Variable.get("analysis_triger_file", default_var=None)
ANALYSIS_LIST = \
Variable.get("analysis_dag_list", default_var={})
def send_log_and_reset_trigger_file_func(**context):
try:
ti = context.get('ti')
xcom_key = \
context['params'].get('xcom_key')
xcom_task = \
context['params'].get('xcom_task')
analysis_list = \
ti.xcom_pull(
task_ids=xcom_task,
key=xcom_key)
df = pd.DataFrame(analysis_list)
if len(df.index)>0:
analysis_counts = \
df.\
groupby("analysis_type").\
size().\
to_dict()
message = \
"Triggred following analysis: {0}".format(analysis_counts)
send_log_to_channels(
slack_conf=SLACK_CONF,
ms_teams_conf=MS_TEAMS_CONF,
task_id=context['task'].task_id,
dag_id=context['task'].dag_id,
comment=message,
reaction='pass')
# reset analysis_trigger_file
pd.DataFrame([]).\
to_csv(
ANALYSIS_TRIGGER_FILE,
header=False,
index=False)
except Exception as e:
logging.error(e)
message = \
'analysis input finding error: {0}'.\
format(e)
send_log_to_channels(
slack_conf=SLACK_CONF,
ms_teams_conf=MS_TEAMS_CONF,
task_id=context['task'].task_id,
dag_id=context['task'].dag_id,
comment=message,
reaction='fail')
raise
def trigger_dag_func(context, dag_run_obj):
try:
ti = context.get('ti')
xcom_key = \
context['params'].get('xcom_key')
xcom_task = \
context['params'].get('xcom_task')
analysis_name = \
context['params'].get('analysis_name')
index = \
context['params'].get('index')
analysis_list = \
ti.xcom_pull(
task_ids=xcom_task,
key=xcom_key)
analysis_detail = \
get_dag_conf_for_analysis(
analysis_list=analysis_list,
analysis_type=analysis_name,
index=index)
#dag_run_obj.payload = analysis_detail
#return dag_run_obj
## FIX for v2
trigger_dag = \
TriggerDagRunOperator(
task_id="trigger_dag_{0}_{1}".format(analysis_name, index),
trigger_dag_id=analysis_name,
conf=analysis_detail)
return trigger_dag.execute(context=context)
except Exception as e:
logging.error(e)
message = \
'analysis input finding error: {0}'.\
format(e)
send_log_to_channels(
slack_conf=SLACK_CONF,
ms_teams_conf=MS_TEAMS_CONF,
task_id=context['task'].task_id,
dag_id=context['task'].dag_id,
comment=message,
reaction='fail')
raise
def get_dag_conf_for_analysis(analysis_list, analysis_type, index):
try:
df = pd.DataFrame(analysis_list)
filter_df = df[df['analysis_type']==analysis_type]
if index >= len(filter_df.index):
raise KeyError(
"Missing key {0} for analysis {1}".\
format(index, analysis_type))
if 'analysis_id' not in filter_df.columns:
raise KeyError("Missing key analysis_id in the analysis_list")
analysis_detail = \
filter_df.\
sort_values('analysis_id').\
to_dict(orient="records")[index]
return analysis_detail
except Exception as e:
raise ValueError(
"Failed to fetch analysis details for trigger, error: {0}".\
format(e))
def get_analysis_ids_from_csv(analysis_trigger_file):
try:
check_file_path(analysis_trigger_file)
if os.path.getsize(analysis_trigger_file) > 0:
data = \
pd.read_csv(
analysis_trigger_file,
header=None,
sep=",")
data.columns = [
"project_igf_id",
"analysis_name"]
else:
data = \
pd.DataFrame(
columns=[
"project_igf_id",
"analysis_name"])
return data.to_dict(orient="records")
except Exception as e:
raise ValueError(
"Failed to get analysis id from {0}, error {1}".\
format(analysis_trigger_file, e))
def fetch_analysis_records(analysis_records_list, db_config_file):
try:
updated_analysis_records = list()
errors_list = list()
db_params = \
read_dbconf_json(db_config_file)
aa = AnalysisAdaptor(**db_params)
aa.start_session()
for analysis_entry in analysis_records_list:
project_igf_id = analysis_entry.get("project_igf_id")
analysis_name = analysis_entry.get("analysis_name")
if project_igf_id is None or \
analysis_name is None:
errors_list.append(
"No project_igf_id or analysis_name found in list: {0}".\
format(analysis_entry) )
else:
analysis_records = \
aa.fetch_analysis_record_by_analysis_name_and_project_igf_id(
analysis_name=analysis_name,
project_igf_id=project_igf_id,
output_mode='dataframe')
if len(analysis_records.index) > 0:
updated_analysis_records.append({
"analysis_name": analysis_name,
"analysis_id": analysis_records["analysis_id"].values[0],
"analysis_type": analysis_records["analysis_type"].values[0],
"analysis_description": analysis_records["analysis_description"].values[0] })
else:
errors_list.\
append(
"No analysis entry found with name: {0} and project: {1}".\
format(analysis_name, project_igf_id))
aa.close_session()
return updated_analysis_records, errors_list
except Exception as e:
raise ValueError(
"Failed to fetch analysis records, error: {0}".\
format(e))
def find_analysis_to_trigger_dags_func(**context):
try:
analysis_limit = \
context['params'].get('analysis_limit')
no_trigger_task = \
context['params'].get('no_trigger_task')
trigger_task_prefix = \
context['params'].get('trigger_task_prefix')
xcom_key = \
context['params'].get('xcom_key')
ti = context.get('ti')
task_list = [no_trigger_task]
analysis_list = \
get_analysis_ids_from_csv(
ANALYSIS_TRIGGER_FILE)
updated_analysis_records, errors_list = \
fetch_analysis_records(
analysis_list,
DATABASE_CONFIG_FILE)
if len(errors_list) > 0:
message = \
"Foung {0} errors while seeding analysis. Ignoring errors: {1}".\
format(len(errors_list), " \n".join(errors_list))
send_log_to_channels(
slack_conf=SLACK_CONF,
ms_teams_conf=MS_TEAMS_CONF,
task_id=context['task'].task_id,
dag_id=context['task'].dag_id,
comment=message,
reaction='fail')
if len(updated_analysis_records) > 0:
task_df = \
pd.DataFrame(
updated_analysis_records)
if "analysis_type" not in task_df.columns:
raise KeyError("analysis_type coulmn not found in updated analysis records")
for analysis_type, t_data in task_df.groupby("analysis_type"):
if analysis_type not in ANALYSIS_LIST.keys():
raise KeyError(
"Missing analysis type {0} in the variable list: {1}".\
format(analysis_type, ANALYSIS_LIST.keys()))
analysis_count = len(t_data.index)
if analysis_count >= analysis_limit:
raise ValueError(
"Need to increase analysis_limit from {0} to {1}".\
format(analysis_limit, analysis_count))
for i in range(0, analysis_count):
task_list.\
append(
"{0}_{1}_{2}".\
format(
trigger_task_prefix,
analysis_type,
i))
ti.xcom_push(
key=xcom_key,
value=updated_analysis_records) # xcom push only if analysis is present
return task_list
except Exception as e:
logging.error(e)
message = \
'analysis input finding error: {0}'.\
format(e)
send_log_to_channels(
slack_conf=SLACK_CONF,
ms_teams_conf=MS_TEAMS_CONF,
task_id=context['task'].task_id,
dag_id=context['task'].dag_id,
comment=message,
reaction='fail')
raise
def parse_analysis_yaml_and_load_to_db(analysis_yaml_file, db_config_file):
try:
check_file_path(analysis_yaml_file)
with open(analysis_yaml_file, 'r') as fp:
yaml_data = yaml.safe_load(fp)
df = pd.DataFrame(yaml_data)
if "project_igf_id" not in df.columns or \
"analysis_name" not in df.columns or \
"analysis_type" not in df.columns or \
"analysis_description" not in df.columns:
raise ValueError(
"Missing required column in {0}".\
format(df.columns))
if len(df.index) > 0:
db_params = \
read_dbconf_json(db_config_file)
aa = AnalysisAdaptor(**db_params)
aa.start_session()
try:
aa.store_analysis_data(df, autosave=False)
fa = FileAdaptor(**{'session': aa.session})
fa.store_file_data(
data=[{'file_path': analysis_yaml_file}],
autosave=False)
aa.commit_session()
aa.close_session()
except:
aa.rollback_session()
aa.close_session()
raise
except Exception as e:
raise ValueError(
"Failed to load analysis {0}, error: {1}".\
format(analysis_yaml_file, e))
def load_analysis_design_func(**context):
try:
task_index = \
context['params'].get('task_index')
load_design_xcom_key = \
context['params'].get('load_design_xcom_key')
load_design_xcom_task = \
context['params'].get('load_design_xcom_task')
ti = context.get('ti')
analysis_files = \
ti.xcom_pull(
task_ids=load_design_xcom_task,
key=load_design_xcom_key)
analysis_file = \
analysis_files[task_index]
if analysis_file is None:
raise ValueError("No analysis file list found")
check_file_path(analysis_file)
parse_analysis_yaml_and_load_to_db(
analysis_file,
DATABASE_CONFIG_FILE)
except Exception as e:
logging.error(e)
message = \
'analysis input loading error: {0}'.\
format(e)
send_log_to_channels(
slack_conf=SLACK_CONF,
ms_teams_conf=MS_TEAMS_CONF,
task_id=context['task'].task_id,
dag_id=context['task'].dag_id,
comment=message,
reaction='fail')
raise
def find_all_analysis_yaml_files(analysis_design_path):
try:
check_file_path(analysis_design_path)
all_yaml_files = list()
for root, _, files in os.walk(analysis_design_path):
for f in files:
if f.endswith(".yaml") or \
f.endswith(".yml"):
file_path = \
os.path.join(root, f)
all_yaml_files.\
append(file_path)
return all_yaml_files
except Exception as e:
raise ValueError(
"Failed to list analysis files in {0}, error: {1}".\
format(analysis_design_path, e))
def get_new_file_list(all_files, db_config_file):
try:
filtered_list = list()
if isinstance(all_files, list) and \
len(all_files) > 0:
db_params = \
read_dbconf_json(db_config_file)
fa = FileAdaptor(**db_params)
fa.start_session()
for f in all_files:
file_exists = \
fa.check_file_records_file_path(f)
if not file_exists:
filtered_list.append(f)
return filtered_list
except Exception as e:
raise ValueError(
"Failed to check db for existing file, error: {0}".format(e))
def find_analysis_designs_func(**context):
try:
load_analysis_task_prefix = \
context['params'].get('load_analysis_task_prefix')
load_task_limit = \
context['params'].get('load_task_limit')
load_design_xcom_key = \
context['params'].get('load_design_xcom_key')
no_task_name = \
context['params'].get('no_task_name')
ti = context.get('ti')
all_files = \
find_all_analysis_yaml_files(
ANALYSIS_LOOKUP_DIR)
new_files = \
get_new_file_list(
all_files,
DATABASE_CONFIG_FILE)
if len(new_files) > load_task_limit:
new_files = new_files[0: load_task_limit] # loading only 20 files, its ok as we never going to get that many
if len(new_files) > 0:
task_list = [
"{0}_{1}".format(load_analysis_task_prefix, i)
for i in range(0, len(new_files))]
ti.xcom_push(
key=load_design_xcom_key,
value=new_files)
else:
task_list = [no_task_name]
return task_list
except Exception as e:
logging.error(e)
message = \
'analysis input finding error: {0}'.\
format(e)
send_log_to_channels(
slack_conf=SLACK_CONF,
ms_teams_conf=MS_TEAMS_CONF,
task_id=context['task'].task_id,
dag_id=context['task'].dag_id,
comment=message,
reaction='fail')
raise | {
"content_hash": "80b461dad07e5ce8aaef5e0bfa6943b8",
"timestamp": "",
"source": "github",
"line_count": 427,
"max_line_length": 146,
"avg_line_length": 32.840749414519905,
"alnum_prop": 0.5875347643157669,
"repo_name": "imperial-genomics-facility/data-management-python",
"id": "557092e4a3e8e74e23691339089ce81e7f3ca675",
"size": "14023",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "igf_airflow/utils/dag18_upload_and_trigger_analysis_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2900"
},
{
"name": "HTML",
"bytes": "77727"
},
{
"name": "JavaScript",
"bytes": "1074"
},
{
"name": "Jinja",
"bytes": "399"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "2553178"
},
{
"name": "R",
"bytes": "376"
},
{
"name": "Shell",
"bytes": "536"
}
],
"symlink_target": ""
} |
"""Django settings for test app."""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'secret_key_for_test'
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'opencensus.ext.django.middleware.OpencensusMiddleware',
)
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'app', 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
OPENCENSUS = {
'TRACE': {
'SAMPLER': 'opencensus.trace.samplers.ProbabilitySampler(rate=0.5)',
'EXPORTER': '''opencensus.ext.ocagent.trace_exporter.TraceExporter(
service_name='foobar',
)''',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_ROOT = 'static'
STATIC_URL = '/static/'
| {
"content_hash": "19394a7b59264a04ce49c752cc570a55",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 76,
"avg_line_length": 26.858974358974358,
"alnum_prop": 0.6544152744630072,
"repo_name": "census-instrumentation/opencensus-python",
"id": "3a391eb8ad5db7650c53f6de6a665693fe7376cc",
"size": "2678",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/opencensus-ext-django/examples/app/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1856"
},
{
"name": "Makefile",
"bytes": "615"
},
{
"name": "Python",
"bytes": "1673591"
},
{
"name": "Shell",
"bytes": "4011"
}
],
"symlink_target": ""
} |
'''
Created on 2014-12-17
@author: Shawn
'''
import json
import unittest
import parser
Encoder = json.JSONEncoder()
Decoder = json.JSONDecoder()
def suite():
testSuite1 = unittest.makeSuite(TestParser, "test")
alltestCase = unittest.TestSuite([testSuite1, ])
return alltestCase
class TestParser(unittest.TestCase):
'''
测试武将相关
'''
def setUp(self):
self.excelFilePath = 'excelfile'
def test_getAllFilenameS(self):
"""
获得指定目录下所有文件的文件名
:return:
"""
theParser = parser.Parser(self.excelFilePath)
print theParser.excelFilenameS
def test_load(self):
"""
读取
:return:
"""
theParser = parser.Parser(self.excelFilePath)
theParser.load()
def test_setInfoArray(self):
"""
解析 成 infoArray 的数据格式
:return:
"""
theParser = parser.Parser(self.excelFilePath)
theParser.load()
for ws in theParser.getAllWorksheet():
print ws.infoArray
| {
"content_hash": "1a37cab417de012aa5ea58fbc7acb71d",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 55,
"avg_line_length": 17.4,
"alnum_prop": 0.5919540229885057,
"repo_name": "lamter/parsexcel",
"id": "aacfb437a5a9db383bcfc6812ab47a0cd5f4b66b",
"size": "1120",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unittest_parserexcel/test_parser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4110"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('websqlrunner', '0002_remove_run_parallel'),
]
operations = [
migrations.AlterField(
model_name='sqlscript',
name='createdby',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| {
"content_hash": "107b7ae7967577bc306c986c4d5ba500",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 110,
"avg_line_length": 25.8,
"alnum_prop": 0.6608527131782945,
"repo_name": "snava10/sqlRunner",
"id": "a3bfe2cf118fd329a415ad74286e7c429497de83",
"size": "588",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "websqlrunner/websqlrunner/migrations/0003_auto_20170726_1428.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "39"
},
{
"name": "CSS",
"bytes": "1067"
},
{
"name": "HTML",
"bytes": "7433"
},
{
"name": "JavaScript",
"bytes": "9418"
},
{
"name": "Python",
"bytes": "28572"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.core.mail import send_mass_mail
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from pigeon.notification import Notification
def build_emails(notification):
context = {'site': notification.site}
for user in notification.users:
message = render_to_string(notification.template_name, context)
yield [
notification.subject,
message,
settings.DEFAULT_FROM_EMAIL,
[user.email],
]
def send_emails(notification):
messages = build_emails(notification)
send_mass_mail(messages)
class AccountInactiveNotification(Notification):
handlers = (send_emails,)
template_name = 'user_deletion/email_notification.txt'
subject = _('Re-activate your account')
class AccountDeletedNotification(Notification):
handlers = (send_emails,)
template_name = 'user_deletion/email_deletion.txt'
subject = _('Your account has been deleted')
| {
"content_hash": "bf8ba2625c182912b998ed446cf3f803",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 71,
"avg_line_length": 30.41176470588235,
"alnum_prop": 0.7079303675048356,
"repo_name": "incuna/django-user-deletion",
"id": "6f6ccb14a25ede7850baff8162f41e8a1921252e",
"size": "1034",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "user_deletion/notifications.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "248"
},
{
"name": "Python",
"bytes": "11049"
}
],
"symlink_target": ""
} |
from ssc.api import Data
def test_data():
input_array = [0, 1, 2, 3]
d = Data()
d['test_array'] = input_array
assert input_array == d['test_array']
| {
"content_hash": "3742b000114c910facc670708be71cf1",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 41,
"avg_line_length": 20.75,
"alnum_prop": 0.5783132530120482,
"repo_name": "StationA/sscpy",
"id": "845c52da4cf936c4cf47e453c19019b663b3be6b",
"size": "166",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "265"
},
{
"name": "Python",
"bytes": "10671"
}
],
"symlink_target": ""
} |
print 'Please think of a number between 0 and 100!'
high = 100
low = 0
found = False
while found == False:
middle = (high + low) / 2
print('Is your secret number ' + str(middle)+ '?')
feedback = raw_input("Enter 'h' to indicate the guess is too high. Enter 'l' to indicate the guess is too low. Enter 'c' to indicate I guessed correctly. ")
if feedback == 'h':
high = middle
elif feedback == 'l':
low = middle
elif feedback == 'c':
found = True
else:
print 'Sorry, I did not understand your input.'
print('Game over. Your secret number was: ' + str(middle))
| {
"content_hash": "dc4004b0dc4141ea363852c3d92c1605",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 160,
"avg_line_length": 29.61904761904762,
"alnum_prop": 0.612540192926045,
"repo_name": "avontd2868/6.00.1x-1",
"id": "f4756e48a8e444d3f5b622000b85b503a995e9f1",
"size": "622",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "l3-problem-9.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from django.template.defaultfilters import slugify
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Don't use "from appname.models import ModelName".
# Use orm.ModelName to refer to models in this application,
# and orm['appname.ModelName'] for models in other applications.
for occupation in orm['admin.Occupation'].objects.all():
occupation.slug = slugify(
occupation.sub_sector.name + ' ' + occupation.name
)
occupation.save()
def backwards(self, orm):
"Write your backwards methods here."
models = {
'admin.company': {
'Meta': {'object_name': 'Company'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': 'None', 'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'nasscom_membership_number': ('django.db.models.fields.CharField', [], {'default': "'N/A'", 'max_length': '20'}),
'training_provider': ('django.db.models.fields.CharField', [], {'default': "'NO'", 'max_length': '3'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '100'})
},
'admin.institution': {
'Meta': {'object_name': 'Institution'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'international': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'default': 'None', 'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '100'})
},
'admin.job': {
'Meta': {'object_name': 'Job'},
'company': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['admin.Company']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_internship': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'job_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'job_roles': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['admin.QualificationPack']"}),
'job_title': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'})
},
u'admin.logentry': {
'Meta': {'ordering': "(u'-action_time',)", 'object_name': 'LogEntry', 'db_table': "u'django_admin_log'"},
'action_flag': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'action_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'change_message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'object_repr': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'admin.occupation': {
'Meta': {'object_name': 'Occupation'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': 'None', 'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'sub_sector': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['admin.SubSector']"})
},
'admin.occupationalstandard': {
'Meta': {'unique_together': "(('code', 'version'),)", 'object_name': 'OccupationalStandard'},
'attachment': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'code': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '9', 'db_index': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': 'None'}),
'drafted_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'knowledge': ('tinymce.models.HTMLField', [], {'default': 'None'}),
'last_reviewed_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'next_review_on': ('django.db.models.fields.DateField', [], {}),
'performace_criteria': ('tinymce.models.HTMLField', [], {'default': 'None'}),
'scope': ('tinymce.models.HTMLField', [], {'default': 'None'}),
'skills': ('tinymce.models.HTMLField', [], {'default': 'None'}),
'sub_sector': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['admin.SubSector']"}),
'title': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50', 'db_index': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '8', 'db_index': 'True'})
},
'admin.qualificationpack': {
'Meta': {'unique_together': "(('code', 'version'),)", 'object_name': 'QualificationPack'},
'alias': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'attachment': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
'code': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '9', 'blank': 'True'}),
'drafted_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'experience': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'job_role': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50', 'db_index': 'True'}),
'last_reviewed_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'max_educational_qualification': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'min_educational_qualification': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'next_jobs': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['admin.QualificationPack']", 'null': 'True', 'blank': 'True'}),
'next_review_on': ('django.db.models.fields.DateField', [], {}),
'nveqf_level': ('django.db.models.fields.CharField', [], {'max_length': '5', 'blank': 'True'}),
'occupation': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['admin.Occupation']"}),
'os_compulsory': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'os_compulsory'", 'blank': 'True', 'to': "orm['admin.OccupationalStandard']"}),
'os_optional': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'os_optional'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['admin.OccupationalStandard']"}),
'role_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'tracks': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['admin.Track']", 'null': 'True', 'blank': 'True'}),
'training': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'blank': 'True'})
},
'admin.sector': {
'Meta': {'object_name': 'Sector', 'index_together': "[['name']]"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': 'None', 'unique': 'True', 'max_length': '9', 'db_index': 'True'})
},
'admin.subsector': {
'Meta': {'unique_together': "(('sector', 'name'),)", 'object_name': 'SubSector', 'index_together': "[['name', 'sector']]"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50', 'db_index': 'True'}),
'sector': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['admin.Sector']"})
},
'admin.track': {
'Meta': {'object_name': 'Track'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': 'None', 'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['admin']
symmetrical = True
| {
"content_hash": "91a265ad0fd5bdf481b375b62e06e2be",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 218,
"avg_line_length": 78.87272727272727,
"alnum_prop": 0.5510988166589826,
"repo_name": "arpitprogressive/arpittest",
"id": "34515f90d8b722a0035f9bb8cdde0e933fc0894a",
"size": "13038",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/admin/migrations/0006_add_slugs_4_occupation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "133532"
},
{
"name": "JavaScript",
"bytes": "227983"
},
{
"name": "Python",
"bytes": "782274"
},
{
"name": "Shell",
"bytes": "290"
}
],
"symlink_target": ""
} |
"""
Handle logging of the application stuff to the database
This will be replaced by something outside the app at some point, so the realy
code should all be in /lib/applogging.py vs in here. This is only the db store
side
"""
from datetime import datetime
from datetime import timedelta
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import Unicode
from sqlalchemy import UnicodeText
from ~~~PROJNAME~~~.models import Base
from ~~~PROJNAME~~~.models import DBSession
class AppLogMgr(object):
@staticmethod
def store(**kwargs):
"""Store a new log record to the db"""
stored = AppLog(**kwargs)
DBSession.add(stored)
@staticmethod
def find(days=1, message_filter=None, status=None):
"""Find a set of app log records based on predefined filters."""
qry = AppLog.query
if status is not None:
qry = qry.filter(AppLog.status == status)
if message_filter:
mfilter = '%{0}%'.format(message_filter)
qry = qry.filter(func.lower(AppLog.message).like(mfilter))
now = datetime.utcnow()
limit = now - timedelta(days=days)
qry = qry.filter(AppLog.tstamp > limit)
return qry.order_by(AppLog.tstamp.desc()).all()
class AppLog(Base):
__tablename__ = 'logging'
id = Column(Integer, autoincrement=True, primary_key=True)
user = Column(Unicode(255), nullable=False)
component = Column(Unicode(50), nullable=False)
status = Column(Unicode(10), nullable=False)
message = Column(Unicode(255), nullable=False)
payload = Column(UnicodeText)
tstamp = Column(DateTime, default=datetime.utcnow)
| {
"content_hash": "bf1027d7bc36c426d5b60dec21863345",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 78,
"avg_line_length": 30.54385964912281,
"alnum_prop": 0.684663986214819,
"repo_name": "mazz/kifu",
"id": "dcc9a674bd5645100a61b12854b0c03b201e30e7",
"size": "1741",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models/applog.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "120340"
},
{
"name": "JavaScript",
"bytes": "40445"
},
{
"name": "Mako",
"bytes": "44114"
},
{
"name": "Python",
"bytes": "113340"
}
],
"symlink_target": ""
} |
import pychrono as chrono
import pychrono.vehicle as veh
import pychrono.irrlicht as irr
def main():
#print("Copyright (c) 2017 projectchrono.org\nChrono version: ", CHRONO_VERSION , "\n\n")
step_size = 0.005
sys = chrono.ChSystemNSC()
sys.Set_G_acc(chrono.ChVectorD(0, 0, -9.81))
sys.SetSolverType(chrono.ChSolver.Type_BARZILAIBORWEIN)
sys.SetSolverMaxIterations(150)
sys.SetMaxPenetrationRecoverySpeed(4.0)
# Create the terrain
terrain = veh.RigidTerrain(sys)
patch_mat = chrono.ChMaterialSurfaceNSC()
patch_mat.SetFriction(0.9)
patch_mat.SetRestitution(0.01)
patch = terrain.AddPatch(patch_mat, chrono.ChVectorD(0, 0, 0), chrono.ChVectorD(0, 0, 1), 200, 100)
patch.SetColor(chrono.ChColor(0.8, 0.8, 0.5))
patch.SetTexture(veh.GetDataFile("terrain/textures/tile4.jpg"), 200, 200)
terrain.Initialize()
# Create and initialize the first vehicle
hmmwv_1 = veh.HMMWV_Reduced(sys)
hmmwv_1.SetInitPosition(chrono.ChCoordsysD(chrono.ChVectorD(0, -1.5, 1.0), chrono.ChQuaternionD(1, 0, 0, 0)))
hmmwv_1.SetPowertrainType(veh.PowertrainModelType_SIMPLE)
hmmwv_1.SetDriveType(veh.DrivelineTypeWV_RWD)
hmmwv_1.SetTireType(veh.TireModelType_RIGID)
hmmwv_1.Initialize()
hmmwv_1.SetChassisVisualizationType(veh.VisualizationType_PRIMITIVES)
hmmwv_1.SetSuspensionVisualizationType(veh.VisualizationType_PRIMITIVES)
hmmwv_1.SetSteeringVisualizationType(veh.VisualizationType_PRIMITIVES)
hmmwv_1.SetWheelVisualizationType(veh.VisualizationType_NONE)
hmmwv_1.SetTireVisualizationType(veh.VisualizationType_PRIMITIVES)
driver_data_1 = veh.vector_Entry([veh.DataDriverEntry(0.0, 0.0, 0.0, 0.0),
veh.DataDriverEntry(0.5, 0.0, 0.0, 0.0),
veh.DataDriverEntry(0.7, 0.3, 0.7, 0.0),
veh.DataDriverEntry(1.0, 0.3, 0.7, 0.0),
veh.DataDriverEntry(3.0, 0.5, 0.1, 0.0)
])
driver_1 = veh.ChDataDriver(hmmwv_1.GetVehicle(), driver_data_1)
driver_1.Initialize()
# Create and initialize the second vehicle
hmmwv_2 = veh.HMMWV_Reduced(sys)
hmmwv_2.SetInitPosition(chrono.ChCoordsysD(chrono.ChVectorD(7, 1.5, 1.0), chrono.ChQuaternionD(1, 0, 0, 0)))
hmmwv_2.SetPowertrainType(veh.PowertrainModelType_SIMPLE)
hmmwv_2.SetDriveType(veh.DrivelineTypeWV_RWD)
hmmwv_2.SetTireType(veh.TireModelType_RIGID)
hmmwv_2.Initialize()
hmmwv_2.SetChassisVisualizationType(veh.VisualizationType_PRIMITIVES)
hmmwv_2.SetSuspensionVisualizationType(veh.VisualizationType_PRIMITIVES)
hmmwv_2.SetSteeringVisualizationType(veh.VisualizationType_PRIMITIVES)
hmmwv_2.SetWheelVisualizationType(veh.VisualizationType_NONE)
hmmwv_2.SetTireVisualizationType(veh.VisualizationType_PRIMITIVES)
driver_data_2 = veh.vector_Entry([veh.DataDriverEntry(0.0, 0.0, 0.0, 0.0),
veh.DataDriverEntry(0.5, 0.0, 0.0, 0.0),
veh.DataDriverEntry(0.7, -0.3, 0.7, 0.0),
veh.DataDriverEntry(1.0, -0.3, 0.7, 0.0),
veh.DataDriverEntry(3.0, -0.5, 0.1, 0.0)
])
driver_2 = veh.ChDataDriver(hmmwv_2.GetVehicle(), driver_data_2)
driver_2.Initialize()
# Create the vehicle Irrlicht interface
app = veh.ChWheeledVehicleIrrApp(hmmwv_1.GetVehicle(), 'Two Car Demo', irr.dimension2du(1000,800))
app.SetSkyBox()
app.AddTypicalLights(irr.vector3df(30, -30, 100), irr.vector3df(30, 50, 100), 250, 130)
app.AddTypicalLogo(chrono.GetChronoDataFile('logo_pychrono_alpha.png'))
app.SetChaseCamera(chrono.ChVectorD(0.0, 0.0, 0.75), 6.0, 0.5)
app.SetChaseCameraState(veh.ChChaseCamera.Track)
app.SetChaseCameraPosition(chrono.ChVectorD(-15, 0, 2.0))
app.SetTimestep(step_size)
app.AssetBindAll()
app.AssetUpdateAll()
# Simulation loop
realtime_timer = chrono.ChRealtimeStepTimer()
while (app.GetDevice().run()):
time = hmmwv_1.GetSystem().GetChTime()
app.BeginScene(True, True, irr.SColor(255, 140, 161, 192))
app.DrawAll()
app.EndScene()
# Get driver inputs
driver_inputs_1 = driver_1.GetInputs()
driver_inputs_2 = driver_2.GetInputs()
# Update modules (process inputs from other modules)
driver_1.Synchronize(time)
driver_2.Synchronize(time)
hmmwv_1.Synchronize(time, driver_inputs_1, terrain)
hmmwv_2.Synchronize(time, driver_inputs_2, terrain)
terrain.Synchronize(time)
app.Synchronize("", driver_inputs_1)
# Advance simulation for one timestep for all modules
driver_1.Advance(step_size)
driver_2.Advance(step_size)
hmmwv_1.Advance(step_size)
hmmwv_2.Advance(step_size)
terrain.Advance(step_size)
app.Advance(step_size)
# Advance state of entire system (containing both vehicles)
sys.DoStepDynamics(step_size)
# Spin in place for real time to catch up
realtime_timer.Spin(step_size)
return 0
# The path to the Chrono data directory containing various assets (meshes, textures, data files)
# is automatically set, relative to the default location of this demo.
# If running from a different directory, you must change the path to the data directory with:
#chrono.SetChronoDataPath('path/to/data')
veh.SetDataPath(chrono.GetChronoDataPath() + 'vehicle/')
main() | {
"content_hash": "36f443d88eded039900056ae8bd29409",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 113,
"avg_line_length": 43.78125,
"alnum_prop": 0.663633119200571,
"repo_name": "Milad-Rakhsha/chrono",
"id": "af8ad5a1080e13d4be6b123bece5395f5d64c8ed",
"size": "6315",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/demos/python/vehicle/demo_VEH_TwoCars.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1834298"
},
{
"name": "C++",
"bytes": "39649020"
},
{
"name": "CMake",
"bytes": "254971"
},
{
"name": "CSS",
"bytes": "9560"
},
{
"name": "Cuda",
"bytes": "2979"
},
{
"name": "GLSL",
"bytes": "4214"
},
{
"name": "HTML",
"bytes": "2856"
},
{
"name": "Inno Setup",
"bytes": "47881"
},
{
"name": "JavaScript",
"bytes": "4942"
},
{
"name": "Logos",
"bytes": "15488"
},
{
"name": "MATLAB",
"bytes": "6957"
},
{
"name": "Makefile",
"bytes": "2254"
},
{
"name": "Objective-C",
"bytes": "40334"
},
{
"name": "Python",
"bytes": "189183"
}
],
"symlink_target": ""
} |
"""Test RPC calls related to net.
Tests correspond to code in rpc/net.cpp.
"""
import time
from test_framework.test_framework import OakcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_jsonrpc,
connect_nodes_bi,
p2p_port,
start_nodes,
)
class NetTest(OakcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 2
def run_test(self):
self._test_connection_count()
self._test_getnettotals()
self._test_getnetworkinginfo()
self._test_getaddednodeinfo()
def _test_connection_count(self):
# connect_nodes_bi connects each node to the other
assert_equal(self.nodes[0].getconnectioncount(), 2)
def _test_getnettotals(self):
# check that getnettotals totalbytesrecv and totalbytessent
# are consistent with getpeerinfo
peer_info = self.nodes[0].getpeerinfo()
assert_equal(len(peer_info), 2)
net_totals = self.nodes[0].getnettotals()
assert_equal(sum([peer['bytesrecv'] for peer in peer_info]),
net_totals['totalbytesrecv'])
assert_equal(sum([peer['bytessent'] for peer in peer_info]),
net_totals['totalbytessent'])
# test getnettotals and getpeerinfo by doing a ping
# the bytes sent/received should change
# note ping and pong are 32 bytes each
self.nodes[0].ping()
time.sleep(0.1)
peer_info_after_ping = self.nodes[0].getpeerinfo()
net_totals_after_ping = self.nodes[0].getnettotals()
for before, after in zip(peer_info, peer_info_after_ping):
assert_equal(before['bytesrecv_per_msg']['pong'] + 32, after['bytesrecv_per_msg']['pong'])
assert_equal(before['bytessent_per_msg']['ping'] + 32, after['bytessent_per_msg']['ping'])
assert_equal(net_totals['totalbytesrecv'] + 32*2, net_totals_after_ping['totalbytesrecv'])
assert_equal(net_totals['totalbytessent'] + 32*2, net_totals_after_ping['totalbytessent'])
def _test_getnetworkinginfo(self):
assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], True)
assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2)
self.nodes[0].setnetworkactive(False)
assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], False)
timeout = 3
while self.nodes[0].getnetworkinfo()['connections'] != 0:
# Wait a bit for all sockets to close
assert timeout > 0, 'not all connections closed in time'
timeout -= 0.1
time.sleep(0.1)
self.nodes[0].setnetworkactive(True)
connect_nodes_bi(self.nodes, 0, 1)
assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], True)
assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2)
def _test_getaddednodeinfo(self):
assert_equal(self.nodes[0].getaddednodeinfo(), [])
# add a node (node2) to node0
ip_port = "127.0.0.1:{}".format(p2p_port(2))
self.nodes[0].addnode(ip_port, 'add')
# check that the node has indeed been added
added_nodes = self.nodes[0].getaddednodeinfo(ip_port)
assert_equal(len(added_nodes), 1)
assert_equal(added_nodes[0]['addednode'], ip_port)
# check that a non-existant node returns an error
assert_raises_jsonrpc(-24, "Node has not been added",
self.nodes[0].getaddednodeinfo, '1.1.1.1')
if __name__ == '__main__':
NetTest().main()
| {
"content_hash": "1495cc9601a19eb39c7aa55d7a5a321f",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 102,
"avg_line_length": 40,
"alnum_prop": 0.6252777777777778,
"repo_name": "stratton-oakcoin/oakcoin",
"id": "eb538abf8713a37ba30eac00fb8b53d30c6fb52f",
"size": "3809",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/net.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28456"
},
{
"name": "C",
"bytes": "694076"
},
{
"name": "C++",
"bytes": "5098228"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "HTML",
"bytes": "51512"
},
{
"name": "Java",
"bytes": "30290"
},
{
"name": "M4",
"bytes": "190257"
},
{
"name": "Makefile",
"bytes": "112101"
},
{
"name": "Objective-C",
"bytes": "3892"
},
{
"name": "Objective-C++",
"bytes": "7240"
},
{
"name": "Protocol Buffer",
"bytes": "2328"
},
{
"name": "Python",
"bytes": "1152477"
},
{
"name": "QMake",
"bytes": "756"
},
{
"name": "Shell",
"bytes": "53022"
}
],
"symlink_target": ""
} |
"""
config
:copyright: 2016 by raptor.zh@gmail.com.
"""
#from __future__ import unicode_literals
import sys
PY3=sys.version>"3"
from os.path import dirname, abspath, expanduser, join as joinpath
import json
import logging
logger = logging.getLogger(__name__)
config_default = {
"CLIENT_KEY": "",
"CLIENT_SECRET": "",
"ACCESS_TOKEN": "",
"ACCESS_SECRET": "",
"PROXY": "",
"FANFOU_HTTPS": True,
"web_addr": "localhost",
"web_port": 8880,
"debug": True,
}
def get_fullname(*args):
root = dirname(abspath(__file__))
return joinpath(root, joinpath(*args)) if len(args) > 0 else root
def uniencode(s, coding="utf-8"):
return s.encode(coding) if s and (PY3 or not isinstance(s, str)) else s
def unidecode(s, coding="utf-8"):
return unicode(s, coding) if s and (not PY3 or isinstance(s, str)) else s
def reload_config():
res = config_default.copy()
try:
with open(get_fullname("config.json"), "r") as f:
config = json.loads(f.read())
res.update(config)
except IOError:
pass
return res
| {
"content_hash": "0712f0d6e31fc6f9b313cbf38589363e",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 77,
"avg_line_length": 21.21153846153846,
"alnum_prop": 0.614687216681777,
"repo_name": "raptorz/pyfan",
"id": "4ac229b7b7a9fa7e646b2a5dbc870284f6a48179",
"size": "1127",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28174"
}
],
"symlink_target": ""
} |
import os, sys
bindir = os.path.abspath(os.path.dirname(sys.argv[0]))
libdir = bindir + "/../lib"
sys.path.append(libdir)
import logging
import lacuna
import lacuna.exceptions as err
import lacuna.binutils.libtrain_spies as lib
ts = lib.TrainSpies()
l = ts.client.user_logger
for p in ts.planets:
### Set the current planet
ts.set_planet( p )
l.info( "Working on {}.".format(ts.planet.name) )
### Get that planet's Int Min. Skip to the next planet if there's no int
### min.
l.info( "Finding Int Min and locating spies on {}.".format(ts.planet.name) )
try:
ts.set_intmin()
l.debug( "Got an int min on {}.".format(ts.planet.name) )
except err.NoSuchBuildingError as e:
l.info( "You don't have an Intelligence Ministry on {}. Skipping.".format(ts.planet.name) )
continue
### Do eet.
for pname, loc in ts.locations.items():
if loc:
ts.train_spies_at( pname, loc )
else:
l.info( "Any training buildings you have on {} are currently full.".format(pname) )
| {
"content_hash": "3bb54836d5c4d2295fca24e4ea078cf0",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 100,
"avg_line_length": 29.833333333333332,
"alnum_prop": 0.6312849162011173,
"repo_name": "tmtowtdi/MontyLacuna",
"id": "824c0fe65ad127898d96f999485a64dc8ec8feb1",
"size": "1094",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/train_spies.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36255146"
},
{
"name": "Shell",
"bytes": "2766"
}
],
"symlink_target": ""
} |
import unittest
from irrexplorer import utils
class TestClassification(unittest.TestCase):
def test_classification(self):
a = utils.classifySearchString('10.0.0.1')
self.assertEquals(type(a), utils.Prefix)
a = utils.classifySearchString('1.3.4.0/24')
self.assertEquals(type(a), utils.Prefix)
a = utils.classifySearchString('AS2603')
self.assertEquals(type(a), utils.ASNumber)
a = utils.classifySearchString('AS-NTT')
self.assertEquals(type(a), utils.ASMacro)
a = utils.classifySearchString('AS-57344')
self.assertEquals(type(a), utils.ASMacro)
a = utils.classifySearchString('AS9498:AS-BHARTI-IN')
self.assertEquals(type(a), utils.ASMacro)
def main():
unittest.main()
if __name__ == '__main__':
main()
| {
"content_hash": "f390af0c0b0d83771f609597ca9c5eac",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 61,
"avg_line_length": 22.35135135135135,
"alnum_prop": 0.6469165659008465,
"repo_name": "job/irrexplorer",
"id": "b8eef2cb6d69309c4a50ac3cedfb6a2e3f9d23dc",
"size": "850",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_utils.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "6168"
},
{
"name": "JavaScript",
"bytes": "4977"
},
{
"name": "PLpgSQL",
"bytes": "4253"
},
{
"name": "Python",
"bytes": "67185"
},
{
"name": "Shell",
"bytes": "2921"
}
],
"symlink_target": ""
} |
from distutils.core import setup, Extension;
module = Extension("spammodule",
sources=["spammodule.c"]);
setup(name = "spammodule",
version="1.0",
description="spam package!",
ext_modules=[module]);
| {
"content_hash": "4d6e85813176aa2c142dd4ca20e80365",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 44,
"avg_line_length": 24.333333333333332,
"alnum_prop": 0.6666666666666666,
"repo_name": "benrbray/matey",
"id": "dc57b55b784dad8a57636f7514df3d36a3009fad",
"size": "219",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "notes/extending-python-c/spammodule/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "23536"
},
{
"name": "C++",
"bytes": "748"
},
{
"name": "Jupyter Notebook",
"bytes": "9980"
},
{
"name": "Makefile",
"bytes": "262"
},
{
"name": "Python",
"bytes": "6762"
}
],
"symlink_target": ""
} |
import cv2
print 'test'
print '123' | {
"content_hash": "be2569313476ff7fdf302e8386fcfe78",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 12,
"avg_line_length": 11.666666666666666,
"alnum_prop": 0.7428571428571429,
"repo_name": "GO-HACKATHON/QuantumSigmoid",
"id": "63f91078578b3613fa39be9650292c6f4f0b9145",
"size": "60",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "frcms/fr.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "46787"
},
{
"name": "CSS",
"bytes": "151372"
},
{
"name": "HTML",
"bytes": "288310"
},
{
"name": "JavaScript",
"bytes": "677105"
},
{
"name": "Jupyter Notebook",
"bytes": "1978996"
},
{
"name": "PHP",
"bytes": "7001312"
},
{
"name": "Python",
"bytes": "4099"
},
{
"name": "Shell",
"bytes": "280"
}
],
"symlink_target": ""
} |
"""
Django settings for testproject project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
import os
import django
try:
import guardian
has_guardian = True
except ImportError:
has_guardian = False
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '9vg3q-kbo(p^zpom4!*o8*%tfu-14o=3++txo+sxwto)2@=qd='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
# Uncomment for testing templates, and after a `pip install django-bootstrap3`
# 'bootstrap3',
# App test
'groups_manager',
'testproject',
)
if has_guardian:
INSTALLED_APPS += ('guardian',)
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
MIDDLEWARE_CLASSES = MIDDLEWARE
# django-guardian required settings
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'guardian.backends.ObjectPermissionBackend',
)
ANONYMOUS_USER_ID = -1
ROOT_URLCONF = 'testproject.urls'
WSGI_APPLICATION = 'testproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
SESSION_COOKIE_NAME = "testproject"
LOGIN_URL = '/admin/login/'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
]
},
},
]
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
# Uncomment to test application settings
"""
GROUPS_MANAGER = {
'AUTH_MODELS_SYNC': True,
'AUTH_MODELS_GET_OR_CREATE': False,
'GROUP_NAME_PREFIX': '',
'GROUP_NAME_SUFFIX': '',
'USER_USERNAME_PREFIX': '',
'USER_USERNAME_SUFFIX': '',
'PERMISSIONS': {
'owner': ['view', 'change', 'delete'],
'group': ['view', 'change'],
'groups_upstream': ['view'],
'groups_downstream': [],
'groups_siblings': ['view'],
},
}
"""
# Uncomment to test slugify function with awesome-slugify
"""
from slugify import slugify
GROUPS_MANAGER = {
'SLUGIFY_FUNCTION': lambda s: slugify(s, to_lower=True),
'SLUGIFY_USERNAME_FUNCTION': lambda s: slugify(s, to_lower=True, separator="_")
}
"""
| {
"content_hash": "bbebdba85dda6f131fd280f537345229",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 83,
"avg_line_length": 23.950617283950617,
"alnum_prop": 0.6724226804123712,
"repo_name": "vittoriozamboni/django-groups-manager",
"id": "7aeed53a2aa2abd2a187392c030d0b253196132b",
"size": "3880",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testproject/testproject/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3376"
},
{
"name": "HTML",
"bytes": "65993"
},
{
"name": "Python",
"bytes": "151823"
},
{
"name": "SCSS",
"bytes": "126"
}
],
"symlink_target": ""
} |
import subprocess
import urllib
'''
download train and test images from S3 to local drive
'''
#settings:
files = ["./data/X_small_test.txt","./data/X_small_train.txt"]
#download files
filenames = []
for file in files:
with open(file,'r') as f:
filenames += f.readlines()
for i in xrange(len(filenames)):
file = filenames[i].replace('\n','').strip()
print 'Getting file %i of %i: %s' % (i+1,len(filenames),file)
imgopen = urllib.URLopener()
imgopen.retrieve('https://s3.amazonaws.com/eds-uga-csci8360/data/project3/images/%s.png' % file, "./data/images/%s.png" % file)
| {
"content_hash": "5c928537dbea1871fa445761067542f4",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 131,
"avg_line_length": 28.761904761904763,
"alnum_prop": 0.6539735099337748,
"repo_name": "unisar/CIFARClassification",
"id": "fb888df0e5ddde7922a4dde80abd9f931b95cb87",
"size": "604",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "prepro/aws_download_pics.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Groff",
"bytes": "20000"
},
{
"name": "Java",
"bytes": "16834"
},
{
"name": "Python",
"bytes": "138600"
}
],
"symlink_target": ""
} |
import os
def prefix_envvar(envvar):
return 'ODOOKU_%s' % envvar
def get_envvar(envvar, default=None):
return os.environ.get(prefix_envvar(envvar), default)
| {
"content_hash": "8ef4dead074528a2426ee14d35efc112",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 57,
"avg_line_length": 18.77777777777778,
"alnum_prop": 0.7100591715976331,
"repo_name": "adaptivdesign/odooku-compat",
"id": "5399b079883bd519a08e2f25ec096fc0fb56c9ca",
"size": "169",
"binary": false,
"copies": "1",
"ref": "refs/heads/10.0",
"path": "odooku/helpers/env.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "6320"
},
{
"name": "Python",
"bytes": "97440"
},
{
"name": "Shell",
"bytes": "150"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import six
from dashboard.api import api_request_handler
from dashboard.pinpoint.models import change
from dashboard.common import utils
if utils.IsRunningFlask():
from flask import request
def _CheckUser():
pass
# TODO(https://crbug.com/1262292): raise directly after Python2 trybots retire.
# pylint: disable=inconsistent-return-statements
@api_request_handler.RequestHandlerDecoratorFactory(_CheckUser)
def CommitHandlerPost():
repository = utils.SanitizeArgs(
args=request.args, key_name='repository', default='chromium')
git_hash = utils.SanitizeArgs(
args=request.args, key_name='git_hash', default='HEAD')
try:
c = change.Commit.FromDict({
'repository': repository,
'git_hash': git_hash,
})
return c.AsDict()
except KeyError as e:
six.raise_from(
api_request_handler.BadRequestError('Unknown git hash: %s' %
git_hash), e)
else:
class Commit(api_request_handler.ApiRequestHandler):
# pylint: disable=abstract-method
def _CheckUser(self):
pass
# TODO(https://crbug.com/1262292): raise directly after Python2 trybots retire.
# pylint: disable=inconsistent-return-statements
def Post(self, *args, **kwargs):
del args, kwargs # Unused.
repository = self.request.get('repository', 'chromium')
git_hash = self.request.get('git_hash')
try:
c = change.Commit.FromDict({
'repository': repository,
'git_hash': git_hash,
})
return c.AsDict()
except KeyError as e:
six.raise_from(
api_request_handler.BadRequestError('Unknown git hash: %s' %
git_hash), e)
| {
"content_hash": "6ff8813d567ab45dec1cddcdbfae8f4f",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 83,
"avg_line_length": 33.14035087719298,
"alnum_prop": 0.6326098464796188,
"repo_name": "catapult-project/catapult",
"id": "324ef032820e1b1d4058ccfc39cdb7a9ae0bbe70",
"size": "2052",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "dashboard/dashboard/pinpoint/handlers/commit.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1324"
},
{
"name": "C++",
"bytes": "46069"
},
{
"name": "CSS",
"bytes": "23376"
},
{
"name": "Dockerfile",
"bytes": "1541"
},
{
"name": "Go",
"bytes": "114396"
},
{
"name": "HTML",
"bytes": "12394298"
},
{
"name": "JavaScript",
"bytes": "1559584"
},
{
"name": "Makefile",
"bytes": "1774"
},
{
"name": "Python",
"bytes": "6778695"
},
{
"name": "Shell",
"bytes": "2288"
}
],
"symlink_target": ""
} |
import collections
import random
import types
import itertools
from yaql.context import EvalArg, ContextAware
from yaql.exceptions import YaqlExecutionException
from yaql.utils import limit
def join(self, others, join_predicate, composer):
for self_item in self():
for other_item in others():
if join_predicate(self_item, other_item):
yield composer(self_item, other_item)
def select(collection, composer):
for item in collection():
yield composer(item)
def _sum(this):
return sum(this())
def _range_limited(start, end):
for i in xrange(int(start()), int(end())):
yield i
def _range_infinite(start):
for i in itertools.count(start()):
yield i
def rand():
return random.random()
@EvalArg('self', collections.Iterable)
def take_while(self, predicate):
for item in self:
if predicate(item):
yield item
else:
return
@EvalArg('self', types.GeneratorType)
def _list(self):
return limit(self)
@ContextAware()
@EvalArg('levels', types.IntType)
def parent(context, levels, func):
con = context
traversed = 0
while con:
if con.data:
traversed += 1
if traversed > levels:
break
con = con.parent_context
if con:
context.data = con.data
else:
return None
return func()
@ContextAware()
def direct_parent(context, func):
return parent(context, 1, func)
@ContextAware()
def _as(self, context, *tuples):
self = self()
for t in tuples:
tup = t(self)
val = tup[0]
key_name = tup[1]
context.set_data(val, key_name)
return self
@ContextAware()
def root(context):
def get_not_null_data(context):
if context.parent_context:
data = get_not_null_data(context.parent_context)
if data:
return data
return context.data
first_data = get_not_null_data(context)
return first_data.get('$')
def switch(self, *conditions):
self = self()
for cond in conditions:
res = cond(self)
if not isinstance(res, types.TupleType):
raise YaqlExecutionException("Switch must have tuple parameters")
if len(res) != 2:
raise YaqlExecutionException("Switch tuples must be of size 2")
if res[0]:
return res[1]
return None
def add_to_context(context):
context.register_function(join, 'join')
context.register_function(select, 'select')
context.register_function(_sum, 'sum')
context.register_function(_range_infinite, 'range')
context.register_function(_range_limited, 'range')
context.register_function(rand, 'random')
context.register_function(_list, 'list')
context.register_function(take_while, 'takeWhile')
context.register_function(root, 'root')
context.register_function(_as, 'as')
context.register_function(switch, 'switch')
| {
"content_hash": "d1641372f5c7609a8727d9daca7c33f4",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 77,
"avg_line_length": 24.311475409836067,
"alnum_prop": 0.6318273769386379,
"repo_name": "istalker2/yaql",
"id": "25ef8b02758a00e9a77a85c75671ea9c1615bd67",
"size": "3580",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yaql/functions/extended.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "49493"
}
],
"symlink_target": ""
} |
import json
from django.db import models
from django.db.models.loading import get_model
from django.core.urlresolvers import reverse
from myuser.models import MyUser
class RVU(models.Model):
"""
Relative value-units; tracks the total RVU values based on Medicare
reimbursement schedules. May change annually.
"""
year = models.PositiveSmallIntegerField()
code = models.CharField(max_length=10)
mod = models.CharField(max_length=4)
description = models.TextField()
work = models.FloatField()
facility = models.FloatField()
malpractice = models.FloatField()
class Meta:
verbose_name = 'Physician Fee Schedule Relative Value'
unique_together = ('year', 'code', 'mod')
def __unicode__(self):
return "{0}{1}: {2}".format(self.code, self.mod, self.description)
def get_total_rvu(self):
return self.work + self.facility + self.malpractice
def total_adjusted_payment(self, user):
profile = user.get_json_profile(json_encode=False)
facility = profile["gpci_facility"]
work = profile["gpci_work"]
malpractice = profile["gpci_malpractice"]
multiplier = profile["multiplier"]
total_rvu = (self.work*work +
self.facility*facility +
self.malpractice*malpractice)
return multiplier*total_rvu*user.get_medicare_factor()
class GPCI(models.Model):
"""
Geographic pricing cost index; adjustments made to RVU by location.
"""
year = models.PositiveSmallIntegerField()
location = models.CharField(max_length=128)
work = models.FloatField()
facility = models.FloatField()
malpractice = models.FloatField()
class Meta:
verbose_name = 'Geographic Price Cost Index'
unique_together = ('year', 'location')
def __unicode__(self):
return self.location
def get_json(self):
d = {}
fields = ('year', 'location', 'work', 'facility', 'malpractice')
for field in fields:
d['gpci_' + field] = getattr(self, field)
return d
@classmethod
def get_default(cls):
"""
Return the default location modification, named National by default.
"""
nationals = cls.objects.filter(location='National')
if nationals.count()==1:
return nationals[0]
else:
return None
class MedicareFactor(models.Model):
"""
Dollars per RVU, adjusted annually.
"""
year = models.PositiveSmallIntegerField(unique=True)
value = models.FloatField()
def __unicode__(self):
return self.year
class OrderSet(models.Model):
"""
Collection of multiple RVU which are commonly ordered together. May
optionally have a user associated with this collection.
"""
user = models.ForeignKey(MyUser, blank=True, null=True)
name = models.CharField(max_length=250)
RVUs = models.ManyToManyField(RVU)
class Meta:
verbose_name = 'Order Set'
def __unicode__(self):
return self.name
def get_rvu_ids_json(self):
return json.dumps(list(self.RVUs.values_list('pk', flat=True)))
def get_total_rvu(self, user):
profile = user.get_json_profile(json_encode=False)
multiplier = profile["multiplier"]
total = 0
for rvu in self.RVUs.all():
total += multiplier*rvu.get_total_rvu()
return total
def get_estimated_cost(self, user):
total = 0
for rvu in self.RVUs.all():
total += rvu.total_adjusted_payment(user)
return total
def get_add_tests_url(self):
return reverse('user:add_orderset_test_performed', kwargs={'pk': self.pk})
def order_tests(self, user, month):
tests = []
TestPerformed = get_model('myuser', 'TestPerformed')
for rvu in self.RVUs.all():
tests.append(TestPerformed(user=user,
rvu=rvu,
month=month))
TestPerformed.objects.bulk_create(tests)
def get_absolute_url(self):
return reverse('user:order_sets_detail', kwargs={'pk': self.pk})
def get_delete_url(self):
return reverse('costs:orderset_delete', kwargs={'pk': self.pk})
| {
"content_hash": "df64b9eab66510fa893ba59f91a660b6",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 82,
"avg_line_length": 30.7,
"alnum_prop": 0.6191251744997673,
"repo_name": "shapiromatron/comp523-medcosts",
"id": "bd551e66d76687dd276afc606596101f11ad3c5e",
"size": "4298",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "costs/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1582"
},
{
"name": "JavaScript",
"bytes": "55555"
},
{
"name": "Python",
"bytes": "105330"
}
],
"symlink_target": ""
} |
from com.android.monkeyrunner import MonkeyRunner, MonkeyDevice
# Connects to the current device, returning a MonkeyDevice object
device = MonkeyRunner.waitForConnection()
# Installs the Android package. Notice that this method returns a boolean, so you can test
# to see if the installation worked.
device.installPackage('../app/target/hk-ust-gmission-1.0.apk')
# sets a variable with the package's internal name
package = 'hk.ust.gmission'
# sets a variable with the name of an Activity in the package
activity = 'hk.ust.gmission.ui.CarouselActivity'
# sets the name of the component to start
runComponent = package + '/' + activity
# Runs the component
device.startActivity(component=runComponent)
MonkeyRunner.sleep(5)
device.type('example@example.com')
# Takes a screenshot
result = device.takeSnapshot()
# Writes the screenshot to a file
result.writeToFile('screenshot.png','png')
| {
"content_hash": "9e3b759e84786ba31cb41d1495fe5f1d",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 90,
"avg_line_length": 29.9,
"alnum_prop": 0.778149386845039,
"repo_name": "gmission/gmission_reborn_android",
"id": "1823e7ed8d8dc176994f9e51fac240fb93c3d21c",
"size": "953",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "integration-tests/monkeyrunnerTestSuite.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "309505"
},
{
"name": "Python",
"bytes": "953"
}
],
"symlink_target": ""
} |
from msrest.serialization import Model
class VirtualNetworkGatewaySku(Model):
"""VirtualNetworkGatewaySku details.
:param name: Gateway SKU name. Possible values are: 'Basic',
'HighPerformance','Standard', and 'UltraPerformance'. Possible values
include: 'Basic', 'HighPerformance', 'Standard', 'UltraPerformance'
:type name: str or
~azure.mgmt.network.v2016_09_01.models.VirtualNetworkGatewaySkuName
:param tier: Gateway SKU tier. Possible values are: 'Basic',
'HighPerformance','Standard', and 'UltraPerformance'. Possible values
include: 'Basic', 'HighPerformance', 'Standard', 'UltraPerformance'
:type tier: str or
~azure.mgmt.network.v2016_09_01.models.VirtualNetworkGatewaySkuTier
:param capacity: The capacity.
:type capacity: int
"""
_validation = {
'name': {'required': True},
'tier': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
'capacity': {'key': 'capacity', 'type': 'int'},
}
def __init__(self, name, tier, capacity=None):
super(VirtualNetworkGatewaySku, self).__init__()
self.name = name
self.tier = tier
self.capacity = capacity
| {
"content_hash": "430b1a051a3639bf86cf1d3479fa7914",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 74,
"avg_line_length": 35.55555555555556,
"alnum_prop": 0.6359375,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "d458232bd0abc81d87f7410748d85d56b10a5ceb",
"size": "1754",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2016_09_01/models/virtual_network_gateway_sku.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
} |
'''
Created by auto_sdk on 2015.04.21
'''
from aliyun.api.base import RestApi
class Ecs20140526DescribeInstanceAttributeRequest(RestApi):
def __init__(self,domain='ecs.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.InstanceId = None
def getapiname(self):
return 'ecs.aliyuncs.com.DescribeInstanceAttribute.2014-05-26'
| {
"content_hash": "9040745fa5470df8792a5d8b55110841",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 64,
"avg_line_length": 32.36363636363637,
"alnum_prop": 0.7331460674157303,
"repo_name": "wanghe4096/website",
"id": "1bb4476ded0c67ce41b50d6e6e1e6daa0886c0ef",
"size": "356",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aliyun/api/rest/Ecs20140526DescribeInstanceAttributeRequest.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "121965"
},
{
"name": "HTML",
"bytes": "163477"
},
{
"name": "JavaScript",
"bytes": "227130"
},
{
"name": "Lua",
"bytes": "5653"
},
{
"name": "Python",
"bytes": "325945"
},
{
"name": "Shell",
"bytes": "1359"
}
],
"symlink_target": ""
} |
"""This is the users module.
"""
from flask import Blueprint
users = Blueprint('users', __name__, static_folder='static', template_folder='templates')
from app.users.views import *
| {
"content_hash": "8617a96539522b695bf7b7f5819aa39b",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 89,
"avg_line_length": 23,
"alnum_prop": 0.7119565217391305,
"repo_name": "flowsha/zhwh",
"id": "206dd349cca6e80a2862f5f74097ea814e487dbd",
"size": "278",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web_root/app/users/__init__.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5190563"
},
{
"name": "HTML",
"bytes": "717242"
},
{
"name": "JavaScript",
"bytes": "39872256"
},
{
"name": "Python",
"bytes": "267106"
},
{
"name": "Ruby",
"bytes": "2412"
}
],
"symlink_target": ""
} |
from glance.hacking import checks
from glance.tests import utils
class HackingTestCase(utils.BaseTestCase):
def test_assert_true_instance(self):
self.assertEqual(1, len(list(checks.assert_true_instance(
"self.assertTrue(isinstance(e, "
"exception.BuildAbortException))"))))
self.assertEqual(
0, len(list(checks.assert_true_instance("self.assertTrue()"))))
def test_assert_equal_type(self):
self.assertEqual(1, len(list(checks.assert_equal_type(
"self.assertEqual(type(als['QuicAssist']), list)"))))
self.assertEqual(
0, len(list(checks.assert_equal_type("self.assertTrue()"))))
def test_assert_equal_none(self):
self.assertEqual(1, len(list(checks.assert_equal_none(
"self.assertEqual(A, None)"))))
self.assertEqual(1, len(list(checks.assert_equal_none(
"self.assertEqual(None, A)"))))
self.assertEqual(
0, len(list(checks.assert_equal_none("self.assertIsNone()"))))
def test_no_translate_debug_logs(self):
self.assertEqual(1, len(list(checks.no_translate_debug_logs(
"LOG.debug(_('foo'))", "glance/store/foo.py"))))
self.assertEqual(0, len(list(checks.no_translate_debug_logs(
"LOG.debug('foo')", "glance/store/foo.py"))))
self.assertEqual(0, len(list(checks.no_translate_debug_logs(
"LOG.info(_('foo'))", "glance/store/foo.py"))))
| {
"content_hash": "fbf7a983dd258aebcb952e114b4e8392",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 75,
"avg_line_length": 37.743589743589745,
"alnum_prop": 0.623641304347826,
"repo_name": "tanglei528/glance",
"id": "dbad0f6e55432bf5abc93df5a09029dee2af9600",
"size": "2077",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glance/tests/test_hacking.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3193082"
},
{
"name": "Shell",
"bytes": "7168"
}
],
"symlink_target": ""
} |
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_param_SimpleIntLink', [dirname(__file__)])
except ImportError:
import _param_SimpleIntLink
return _param_SimpleIntLink
if fp is not None:
try:
_mod = imp.load_module('_param_SimpleIntLink', fp, pathname, description)
finally:
fp.close()
return _mod
_param_SimpleIntLink = swig_import_helper()
del swig_import_helper
else:
import _param_SimpleIntLink
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static) or hasattr(self,name):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self,name,value):
if (name == "thisown"): return self.this.own(value)
if hasattr(self,name) or (name == "this"):
set(self,name,value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
import param_BasicIntLink
import param_BasicRouter
import param_SimObject
import param_BasicLink
class SimpleIntLink(param_BasicIntLink.BasicIntLink):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
SimpleIntLink_swigregister = _param_SimpleIntLink.SimpleIntLink_swigregister
SimpleIntLink_swigregister(SimpleIntLink)
class SimpleIntLinkParams(param_BasicIntLink.BasicIntLinkParams):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def create(self): return _param_SimpleIntLink.SimpleIntLinkParams_create(self)
def __init__(self):
this = _param_SimpleIntLink.new_SimpleIntLinkParams()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _param_SimpleIntLink.delete_SimpleIntLinkParams
__del__ = lambda self : None;
SimpleIntLinkParams_swigregister = _param_SimpleIntLink.SimpleIntLinkParams_swigregister
SimpleIntLinkParams_swigregister(SimpleIntLinkParams)
| {
"content_hash": "3ce1e998eca5f4b84c396e4688d6c0fe",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 107,
"avg_line_length": 35.98969072164948,
"alnum_prop": 0.6545402463477513,
"repo_name": "silkyar/570_Big_Little",
"id": "9fd28afa96945d9c04f3ccc77d31c9c7477eeb77",
"size": "3695",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build/ARM/python/m5/internal/param_SimpleIntLink.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "232078"
},
{
"name": "C",
"bytes": "887097"
},
{
"name": "C++",
"bytes": "52497889"
},
{
"name": "D",
"bytes": "13736198"
},
{
"name": "Emacs Lisp",
"bytes": "1969"
},
{
"name": "Java",
"bytes": "3096"
},
{
"name": "JavaScript",
"bytes": "78818"
},
{
"name": "Perl",
"bytes": "13199821"
},
{
"name": "Prolog",
"bytes": "977139"
},
{
"name": "Python",
"bytes": "3831426"
},
{
"name": "Ruby",
"bytes": "19404"
},
{
"name": "Scilab",
"bytes": "14370"
},
{
"name": "Shell",
"bytes": "16704"
},
{
"name": "Visual Basic",
"bytes": "2884"
},
{
"name": "XML",
"bytes": "16048"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.