repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
veloutin/papas | etc/papas/settings.py | Python | agpl-3.0 | 4,958 | 0.004639 | # Django settings for apmanager project.
import os
from django.utils.translation import ugettext_lazy as _
DEBUG = True
USE_DAEMON = True
if os.environ.get("USE_DEV_PATHS", None):
DEV_PATHS = True
else:
DEV_PATHS = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'sqlite3' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'ado_mssql'.
DATABASE_NAME = 'papas.sqlite' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://www.postgresql.org/docs/8.1/static/datetime-keywords.html#DATETIME-TIMEZONE-SET-TABLE
# although not all variations may be possible on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Montreal'
# Language code for this installation. All choices can be found here:
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
# http://blogs.law.harvard.edu/tech/stories/storyReader$15
LANGUAGES = (
('fr', _("French")),
('en', _("English")),
)
LANGUAGE_CODE = 'fr_CA'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
if DEV_PATHS:
UPLOAD_ROOT = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"uploads",
)
)
else:
UPLOAD_ROOT = '/var/lib/apmanager/uploads/'
#Site prefix to add to relative urls, such as apmanager/ for a site on example.com/apmanager/
# Leave blank if installed on web root
LOGIN_URL = "/accounts/login/"
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
if DEV_PATHS:
MEDIA_ROOT = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"..", "..", "apmanager", 'templates','site-media'),
)
else:
MEDIA_ROOT = "/usr/share/apmanager/templates/site-media"
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com"
MEDIA_URL = "/site-media/"
| # URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = "/media/"
# Make this unique, and don't share it with anybody.
SECRET_KEY = ')@1wt()$4x&&e9c#n&viv-g#k20(p!_ga)s$+4i!*hbdcid$)s'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesy | stem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.doc.XViewMiddleware',
)
ROOT_URLCONF = 'apmanager.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.abspath(os.path.join(MEDIA_ROOT, "..")),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'apmanager.accesspoints',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.request',
)
if DEV_PATHS:
WATCH_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__),"..","..","watch"),
)
else:
WATCH_DIR='/var/lib/apmanager/watch'
COMMAND_WATCH_DIR = WATCH_DIR + '/commands'
AP_DIR = WATCH_DIR + '/ap'
AP_REFRESH_WATCH_DIR = AP_DIR + '/refresh'
AP_INIT_WATCH_DIR = AP_DIR + '/init'
LOCALE_PATHS = (
'/usr/share/apmanager/locale',
)
if DEV_PATHS:
LOCALE_PATHS = LOCALE_PATHS + (
os.path.join(os.path.dirname(__file__), "..", "..", "locale"),
)
TEMPLATE_DIRS = TEMPLATE_DIRS + (
os.path.join(os.path.dirname(__file__), "..", "..", "apmanager"),
)
for dpath in (
UPLOAD_ROOT,
WATCH_DIR,
AP_DIR,
AP_REFRESH_WATCH_DIR,
AP_INIT_WATCH_DIR,
COMMAND_WATCH_DIR,
):
if not os.path.isdir(dpath): os.mkdir(dpath)
|
kamyu104/LeetCode | Python/accounts-merge.py | Python | mit | 3,714 | 0 | # Time: O(nlogn), n is the number of total emails,
# and the max length ofemail is 320, p.s. {64}@{255}
# Space: O(n)
# Given a list accounts, each element accounts[i] is a list of strings,
# where the first element accounts[i][0] is a name,
# and the rest of the elements are emails representing emails of the account.
#
# Now, we would like to merge these accounts.
# Two accounts definitely belong to the same person if there is some email
# that is common to both accounts.
# Note that even if two accounts have the same name,
# they may belong to different people as people could have the same name.
# A person can have any number of accounts initially, but all of their
# accounts definitely have the same name.
#
# After merging the accounts, return the accounts in the following format:
# the first element of each account is the name, and the rest of the elements
# are emails in sorted order.
# The accounts themselves can be returned in any order.
#
# Example 1:
# Input:
# accounts = [["John", "johnsmith@mail.com", "john00@mail.com"],
# ["John", "johnnybravo@mail.com"],
# ["John", "johnsmith@mail.com", "john_newyork@mail.com"],
# ["Mary", "mary@mail.com"]]
# Output: [["John", 'john00@mail.com', 'john_newyork@mail.com',
# 'johnsmith@mail.com'],
# ["John", "johnnybravo@mail.com"], ["Mary", "mary@mail.com"]]
#
# Explanation:
# The first and third John's are the same person as they have the common
# email "johnsmith@mail.com".
# The second John and Mary are different people as none of their email
# addresses are used by other accounts.
# We could return these lists in any order,
# for example the answer [['Mary', 'mary@mail.com'],
# ['John', 'johnnybravo@mail.com'],
# ['John', 'john00@mail.com', 'john_newyork@mail.com',
# 'johnsmith@mail.com']]
# would still be accepted.
#
# Note:
#
# The length of accounts will be in the range [1, 1000].
# The length of accounts[i] will be in the range [1, 10].
# The length of accounts[i][j] will be in the range [1, 30].
import collections
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
class UnionFind(object):
def __init__(self):
self.set = []
def get_id(self):
self.set.append(len(self.set))
return len(self.set)-1
def f | ind_set(self, x):
if self.set[x] != x:
self.set[x] = self.find_set(self.set[x]) # path compression.
return self.set[x]
def union_set(self, x, y):
x_root, y_root = map(self.find_set, (x, y))
if x_root != y_root:
self.set[min(x_root, y_root)] = max(x_root, y_root)
class Solution(object):
def accountsMerge(self, accounts):
"""
:type accounts: List[List[str]]
:rtype: List[List[str]]
| """
union_find = UnionFind()
email_to_name = {}
email_to_id = {}
for account in accounts:
name = account[0]
for i in xrange(1, len(account)):
if account[i] not in email_to_id:
email_to_name[account[i]] = name
email_to_id[account[i]] = union_find.get_id()
union_find.union_set(email_to_id[account[1]],
email_to_id[account[i]])
result = collections.defaultdict(list)
for email in email_to_name.keys():
result[union_find.find_set(email_to_id[email])].append(email)
for emails in result.values():
emails.sort()
return [[email_to_name[emails[0]]] + emails
for emails in result.values()]
|
lechat/jenkinsflow | test/cause_test.py | Python | bsd-3-clause | 966 | 0.006211 | # Copyright (c) 2012 - 2015 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
from jenkinsflow.flow import serial
from .framework import api_select
# TODO: Actually test that cause is set
def test_cause_no_build_number(api_type, env_job_name):
with api_select.api(__file__, api_type) as api:
api.flow_job()
api.job('j1', exec_time=0.01, max_fails=0, expect_invocations=1, expect_order=1)
with serial(api, timeout=70, job_name_prefix=api.job_name_prefix, report_interval=1) as ctrl1:
ctrl1.invoke('j1')
def test_cause(api_type, env_job_name, env_build | _number):
with api_select.api(__fi | le__, api_type) as api:
api.flow_job()
api.job('j1', exec_time=0.01, max_fails=0, expect_invocations=1, expect_order=1)
with serial(api, timeout=70, job_name_prefix=api.job_name_prefix, report_interval=1) as ctrl1:
ctrl1.invoke('j1')
|
dspinellis/effective-debugging | logging/Python/eventlog.py | Python | apache-2.0 | 220 | 0.004545 | import logging;
logger = logging.getLogger('myapp')
# Send | log messa | ges to myapp.log
fh = logging.FileHandler('myapp.log')
logger.addHandler(fh)
logger.setLevel(logging.DEBUG)
logger.debug('In main module')
|
danbradham/hotline | hotline/widgets.py | Python | mit | 14,920 | 0.000268 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from collections import deque
from contextlib import contextmanager
from hotline.utils import event_loop
from hotline.anim import *
from hotline.vendor.Qt import QtWidgets, QtCore, QtGui
class ActiveScreen(object):
@staticmethod
def active():
desktop = QtWidgets.QApplication.instance().desktop()
active = desktop.screenNumber(desktop.cursor().pos())
return desktop.screenGeometry(active)
@classmethod
def top(cls):
rect = cls.active()
return int(rect.width() * 0.5), 0
@classmethod
def center(cls):
return cls.active().center().toTuple()
class CommandList(QtWidgets.QListWidget):
# TODO add support for icons
def __init__(self, items, lineedit, parent=None):
super(CommandList, self).__init__(parent)
self.setWindowFlags(
QtCore.Qt.Window |
QtCore.Qt.FramelessWindowHint
)
self.setAttribute(QtCore.Qt.WA_ShowWithoutActivating)
self.setMinimumSize(1, 1)
self.setSizePolicy(
QtWidgets.QSizePolicy.Ignored,
QtWidgets.QSizePolicy.Ignored
)
self.parent = parent
self.lineedit = lineedit
self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setFocusPolicy(QtCore.Qt.NoFocus)
self.itemSelectionChanged.connect(self.parent.activateWindow)
self.items = items
@property
def items(self):
return (self.item(i) for i in range(self.count()))
@items.setter
def items(self, value):
self.clear()
self.addItems(value)
self.setGeometry(self._get_geometry())
def visible_count(self):
count = 0
for item in self.items:
if not item.isHidden():
count += 1
return count
def select_next(self):
row = self.currentRow()
while True:
row += 1
if row > self.count() - 1:
return
if self.item(row).isHidden():
continue
self.setCurrentRow(row)
return
def select_prev(self):
row = self.currentRow()
while True:
row -= 1
if row < 0:
self.setCurrentRow(row)
return
if self.item(row).isHidden():
continue
self.setCurrentRow(row)
return
def is_match(self, letters, item):
letters = deque(letters.lower())
l = letters.popleft()
for char in item.lower():
if char == l:
try:
l = letters.popleft()
except IndexError:
return True
return False
def filter(self, text):
text = text.strip(' ')
if not text:
for item in self.items:
item.setHidden(False)
self.setCurrentRow(-1)
else:
best_match = -1
for i, item in enumerate(self.items):
match = self.is_match(text, item.text())
if match and best_match < 0:
best_match = i
elif text == item.text():
best_match = i
item.setHidden(not match)
self.setCurrentRow(best_match)
self.setGeometry(self._get_geometry())
def _get_geometry(self):
visible_count = self.visible_count()
if not visible_count:
return QtCore.QRect(-1, -1, 0, 0)
r = self.parent.rect()
pos = self.parent.mapToGlobal(QtCore.QPoint(r.right(), r.bottom()))
width = self.parent.width()
left = pos.x() - width + 1
top = pos.y() - 2
height = self.parent._height * min(visible_count, 5)
return QtCore.QRect(left, top, width, height)
def show(self):
self.setCurrentRow(-1)
super(CommandList, self).show()
class Console(QtWidgets.QDialog):
def __init__(self, parent=None):
super(Console, self).__init__(parent)
self.setWindowTitle('Hotline Console')
self.setAttribute(QtCore.Qt.WA_ShowWithoutActivating)
self.parent = parent
self.output = QtWidgets.QTextEdit(self)
self.output.setReadOnly(True)
self.layout = QtWidgets.QVBoxLayout(self)
self.layout.addWidget(self.output)
self.setLayout(self.layout)
def write(self, message):
self.output.moveCursor(QtGui.QTextCursor.End)
self.output.insertPlainText(message)
self.output.moveCursor(QtGui.QTextCursor.End)
self.output.repaint()
def show(self):
if self.isVisible():
return
r = self.parent.rect()
pos = self.parent.mapToGlobal(QtCore.QPoint(r.right(), r.bottom()))
width = r.width()
left = pos.x() - width + 1
top = pos.y() + 72 * 6
height = width
self.setGeometry(QtCore.QRect(left, top, width, height))
super(Console, self).show()
class InputField(QtWidgets.QLineEdit):
focusOut = QtCore.Signal(object)
def __init__(self, placeholder=None, parent=None):
super(InputField, self).__init__(parent=parent)
self.parent = parent
if placeholder:
self.setPlaceholderText(placeholder)
def focusOutEvent(self, event):
event.accept()
self.focusOut.emit(event)
def keyPressEvent(self, event):
# sometimes focus is a little sticky
if not self.isVisible():
self.clearFocus()
event.reject()
return
enter_pressed = event.key() == QtCore.Qt.Key_Enter
if enter_pressed and not self.text():
event.accept()
return
return super(InputField, self).keyPressEvent(event)
class Dialog(QtWidgets.QDialog):
def __init__(self, parent=None):
super(Dialog, self).__init__(parent)
self.parent = parent
self._set_sizes()
self._alt_f4_pressed = False
self._animation = 'slide'
self._position = 'center'
self.pinned = | False
self.setWindowTitle('Hotline')
| self.setWindowFlags(
QtCore.Qt.Window |
QtCore.Qt.FramelessWindowHint
)
self.setMinimumSize(1, 1)
self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
self.mode_button = QtWidgets.QPushButton(self)
self.mode_button.setMinimumWidth(self._height)
self.mode_button.setSizePolicy(
QtWidgets.QSizePolicy.Minimum,
QtWidgets.QSizePolicy.Minimum,
)
self.mode_button.setFocusPolicy(QtCore.Qt.NoFocus)
self.mode_button.hide()
self.input_field = InputField(parent=self)
self.input_field.setSizePolicy(
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding,
)
self.commandlist = CommandList([], self.input_field, self)
self.commandlist.itemClicked.connect(self.accept)
self.input_field.textChanged.connect(self.commandlist.filter)
self.input_field.focusOut.connect(self.reject)
self._wrapper = QtWidgets.QWidget(parent=self)
self._wrapper.setObjectName('Hotline')
self.layout = QtWidgets.QHBoxLayout(self._wrapper)
self.layout.setAlignment(
QtCore.Qt.AlignLeft |
QtCore.Qt.AlignVCenter
)
self.layout.setContentsMargins(2, 2, 2, 2)
self.layout.setSpacing(0)
self.layout.addWidget(self.mode_button)
self.layout.addWidget(self.input_field)
self._layout = QtWidgets.QHBoxLayout(self)
self._layout.setContentsMargins(0, 0, 0, 0)
self._layout.setSpacing(0)
self._layout.addWidget(self._wrapper)
self._wrapper.setLayout(self.layout)
self.setLayout(self._layout)
self.console = Console(self)
self.hk_up = QtWidgets.QShortcut(self)
self.hk_up.setKey('Up')
self.hk_up.activated.connect(self.commandlist.select_prev)
self.hk_dn = QtWidgets.QShortcut(self)
self.hk_dn.setKey('Down')
|
atul-bhouraskar/django | tests/admin_views/admin.py | Python | bsd-3-clause | 36,442 | 0.000494 | import datetime
from io import StringIO
from wsgiref.util import FileWrapper
from django import forms
from django.contrib import admin
from django.contrib.admin import BooleanFieldListFilter
from django.contrib.admin.views.main import ChangeList
from django.contrib.auth.admin import GroupAdmin, UserAdmin
from django.contrib.auth.models import Group, User
from django.core.exceptions import ValidationError
from django.core.mail import EmailMessage
from django.db import models
from django.forms.models import BaseModelFormSet
from django.http import HttpResponse, JsonResponse, StreamingHttpResponse
from django.urls import path
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.views.decorators.common import no_append_slash
from .forms import MediaActionForm
from .models import (
Actor, AdminOrderedAdminMethod, AdminOrderedCallable, AdminOrderedField,
AdminOrderedModelMethod, Album, Answer, Answer2, Article, BarAccount, Book,
Bookmark, Category, Chapter, ChapterXtra1, Child, ChildOfReferer, Choice,
City, Collector, Color, Color2, ComplexSortedPerson, CoverLetter,
CustomArticle, CyclicOne, CyclicTwo, DependentChild, DooHickey, EmptyModel,
EmptyModelHidden, EmptyModelMixin, EmptyModelVisible, ExplicitlyProvidedPK,
ExternalSubscriber, Fabric, FancyDoodad, FieldOverridePost,
FilteredManager, FooAccount, FoodDelivery, FunkyTag, Gadget, Gallery,
GenRelReference, Grommet, ImplicitlyGeneratedPK, Ingredient,
InlineReference, InlineReferer, Inquisition, Language, Link,
MainPrepopulated, ModelWithStringPrimaryKey, NotReferenced, OldSubscriber,
OtherStory, Paper, Parent, ParentWithDependentChildren, ParentWithUUIDPK,
Person, Persona, Picture, Pizza, Plot, PlotDetails, PlotProxy,
PluggableSearchPerson, Podcast, Post, PrePopulatedPost,
PrePopulatedPostLargeSlug, PrePopulatedSubPost, Promo, Question,
ReadablePizza, ReadOnlyPizza, ReadOnlyRelatedField, Recipe, Recommendation,
Recommender, ReferencedByGenRel, ReferencedByInline, ReferencedByParent,
RelatedPrepopulated, RelatedWithUUIDPKModel, Report, Reservation,
Restaurant, RowLevelChangePermissionModel, Section, ShortMessage, Simple,
Sketch, Song, State, Story, StumpJoke, Subscriber, SuperVillain, Telegram,
Thing, Topping, UnchangeableObject, UndeletableObject, UnorderedObject,
UserMessenger, UserProxy, Villain, Vodcast, Whatsit, Widget, Worker,
WorkHour,
)
@admin.display(ordering='date')
def callable_year(dt_value):
try:
return dt_value.year
except AttributeError:
return None
class ArticleInline(admin.TabularInline):
model = Article
fk_name = 'section'
prepopulated_fields = {
'title': ('content',)
}
fieldsets = (
('Some fields', {
'classes': ('collapse',),
'fields': ('title', 'content')
}),
('Some other fields', {
'classes': ('wide',),
'fields': ('date', 'section')
})
)
class ChapterInline(admin.TabularInline):
model = Chapter
class ChapterXtra1Admin(admin.ModelAdmin):
list_filter = (
'chap',
'chap__title',
'chap__book',
'chap__book__name',
'chap__book__promo',
'chap__book__promo__name',
'guest_author__promo__book',
)
class ArticleForm(forms.ModelForm):
extra_form_field = forms.BooleanField(required=False)
class Meta:
fields = '__all__'
model = Article
class ArticleAdminWithExtraUrl(admin.ModelAdmin):
def get_urls(self):
urlpatterns = super().get_urls()
urlpatterns.append(
path('extra.json', self.admin_site.admin_view(self.extra_json), name='article_extra_json')
)
return urlpatterns
def extra_json(self, request):
return JsonResponse({})
class ArticleAdmin(ArticleAdminWithExtraUrl):
list_display = (
'content', 'date', callable_year, 'model_year', 'modeladmin_year',
'model_year_reversed', 'section', lambda obj: obj.title,
'order_by_expression', 'model_property_year', 'model_month',
'order_by_f_expression', 'order_by_orderby_expression',
)
list_editable = ('section',)
list_filter = ('date', 'section')
autocomplete_fields = ('section',)
view_on_site = False
form = ArticleForm
fieldsets = (
('Some fields', {
'classes': ('collapse',),
'fields': ('title', 'content', 'extra_form_field'),
}),
('Some other fields', {
'classes': ('wide',),
'fields': ('date', 'section', 'sub_section')
})
)
# These orderings aren't particularly useful but show that expressions can
# be used for admin_order_field.
@admin.display(ordering=models.F('date') + datetime.timedelta(days=3))
def order_by_expression(self, obj):
return obj.model_year
@admin.display(ordering=models.F('date'))
def order_by_f_expression(self, obj):
return obj.model_year
@admin.display(ordering=models.F('date').asc(nulls_last=True))
def order_by_orderby_expression(self, obj):
return obj.model_year
def changelist_view(self, request):
return super().changelist_view(request, extra_context={'extra_var': 'Hello!'})
@admin.display(ordering='date', description=None)
def modeladmin_year(self, obj):
return obj.date.year
def delete_model(self, request, obj):
EmailMessage(
'Greetings from a deleted object',
'I hereby inform you that some user deleted me',
'from@example.com',
['to@example.com']
).send()
return super().delete_model(request, obj)
def save_model(self, request, obj, form, change=True):
EmailMessage(
'Greetings from a created object',
'I hereby inform you that some user created me',
'from@example.com',
['to@example.com']
).send()
return super().save_model(request, obj, form, change)
class ArticleAdmin2(admin.ModelAdmin):
def has_module_permission(self, request):
return False
class RowLevelChangePermissionModelAdmin(admin.ModelAdmin):
def has_change_permission(self, request, obj=None):
""" Only allow changing objects with even id number """
return request.user.is_staff and (obj is not None) and (obj.id % 2 == 0)
def has_view_permission(self, request, obj=None):
"""Only allow viewing objects if id is a multiple of 3."""
return request.user.is_staff and obj is not None and obj.id % 3 == 0
class CustomArticleAdmin(admin.ModelAdmin):
"""
Tests various hooks for using custom templates and contexts.
"""
change_list_template = 'custom_admin/change_list.html'
change_form_template = 'custom_admin/change_form.html'
add_form_template = 'custom_admin/add_form.html'
object_history_template = 'custom_admin/object_history.html'
delete_confirmation_template = 'custom_admin/delete_confirmation.html'
delete_selected_confirmation_template = 'custom_admin/delete_selected_confirmation.html'
popup_response_template = 'custom_admin/popup_response.html'
def changelist_view(self, request):
return super().changelist_view(request, extra_context={'extr | a_var': 'Hello!'})
class ThingAdmin(admin.ModelAdmin):
list_filter = ('color', 'color__warm', 'color__value', 'pub_date')
class InquisitionAdmin(admin.ModelAdmin):
list_display = ('leader', 'country', 'expected', 'sketch')
@admin.display
def sketch(self, obj):
# A method with the same name as a reverse accessor.
return 'list-displ | ay-sketch'
class SketchAdmin(admin.ModelAdmin):
raw_id_fields = ('inquisition', 'defendant0', 'defendant1')
class FabricAdmin(admin.ModelAdmin):
list_display = ('surface',)
list_filter = ('surface',)
class BasePersonModelFormSet(BaseModelFormSet):
def clean(self):
for person_dict in self.cleaned_data:
person = person_dict.get('id')
alive = person_dict.get('alive')
if person an |
Kaniabi/ben10 | source/python/ben10/foundation/print_detailed_traceback.py | Python | lgpl-2.1 | 6,374 | 0.006119 | from __future__ import unicode_literals
import locale
import sys
#===================================================================================================
# _StreamWrapper
#===================================================================================================
class _StreamWrapper(object):
'''
A simple wrapper to decode bytes into unicode objects before writing to an unicode-only stream.
'''
def __init__(self, stream, encoding):
self.stream = stream
self.encoding = encoding
def write(self, value):
self.stream.write(value.decode(self.encoding))
#===================================================================================================
# PrintDetailedTraceback
#===================================================================================================
def PrintDetailedTraceback(exc_info=None, stream=None, max_levels=None, max_line_width=120, omit_locals=False):
'''
Prints a more detailed traceback than Python's original one showing, for each frame, also the
locals at that frame and their values.
:type exc_info: (type, exception, traceback)
:param exc_info:
The type of the exception, the exception instance and the traceback object to print. Exactly
what is returned by sys.exc_info(), which is used if this param is not given.
:type stream: file-like object
:param stream:
File like object to print the traceback to. Note that the traceback will be written directly
as unicode to the stream, unless the stream is either sys.stderr or sys.stdout. If no stream
is passed, sys.stderr is used.
:param int max_levels:
The maximum levels up in the traceback to display. If None, print all levels.
:param int max_line_width:
The maximum line width for each line displaying a local variable in a stack frame. Each
line displaying a local variable will be truncated at the middle to avoid cluttering
the display;
:param bool omit_locals:
If true it will omit function arguments and local variables from traceback. It is an option
especially interesting if an error during a function may expose sensitive data, like an user
private information as a password. Defaults to false as most cases won't be interested in
this feature.
'''
from ben10.foundation.exceptions import ExceptionToUnicode
from ben10.foundation.klass import IsInstance
import StringIO
import cStringIO
import io
import locale
assert not IsInstance(stream, (StringIO.StringIO, cStringIO.OutputType)), 'Old-style StringIO passed to PrintDetailedTraceback()'
# For sys.stderr and sys.stdout, we should encode the unicode objects before writing.
def _WriteToEncodedStream(message):
assert type(message) is unicode
encoding = getattr(stream, 'encoding', None)
stream.write(message.encode(encoding or 'utf-8', errors='replace'))
def _WriteToUnicodeStream(message):
assert type(message) is unicode
stream.write(message)
if stream is None:
stream = sys.stderr
if stream in (sys.stderr, sys.stdout):
_WriteToStream = _WriteToEncodedStream
wrapped_stream = stream
else:
_WriteToStream = _WriteToUnicodeStream
encoding = locale.getpreferredencoding()
wrapped_stream = _StreamWrapper(stream, encoding)
if exc_info is None:
exc_info = sys.exc_info()
exc_type, exception, tb = exc_info
if exc_type is None or tb is None:
# if no exception is given, or no traceback is available, let the print_exception deal
# with it. Since our stream deals with unicode, wrap it
import traceback
traceback.print_exception(exc_type, exception, tb, max_levels, wrapped_stream)
return
# find the bottom node of the traceback
while True:
if not tb.tb_next:
break
tb = tb.tb_next
# obtain the stack frames, up to max_levels
stack = []
f = tb.tb_frame
levels = 0
while f:
stack.append(f)
f = f.f_back
levels += 1
if max_levels is not None and levels >= max_levels:
break
stack.reverse()
_WriteToStream('Traceback (most recent call last):\n')
encoding = locale.getpreferredencoding()
for frame in stack:
params = dict(
| name=frame.f_code.co_name.decode(encoding),
filename=frame.f_code.co_filename.decode(encoding),
lineno=frame.f_lineno,
)
_WriteToStream(' File "%(filename)s", line %(lineno)d, in %(name)s\n' % params)
try:
lines = io.open(frame.f_code.co_filename).readlines()
line = lines[frame.f_lineno - 1]
except:
| pass # don't show the line source
else:
_WriteToStream(' %s\n' % line.strip())
if not omit_locals:
# string used to truncate string representations of objects that exceed the maximum
# line size
trunc_str = '...'
for key, value in sorted(frame.f_locals.iteritems()):
ss = ' %s = ' % key
# be careful to don't generate any exception while trying to get the string
# representation of the value at the stack, because raising an exception here
# would shadow the original exception
try:
val_repr = repr(value).decode(encoding)
except:
val_repr = '<ERROR WHILE PRINTING VALUE>'
else:
# if the val_pre exceeds the maximium size, we truncate it in the middle
# using trunc_str, showing the start and the end of the string:
# "[1, 2, 3, 4, 5, 6, 7, 8, 9]" -> "[1, 2, ...8, 9]"
if len(ss) + len(val_repr) > max_line_width:
space = max_line_width - len(ss) - len(trunc_str)
middle = int(space / 2)
val_repr = val_repr[:middle] + trunc_str + val_repr[-(middle + len(trunc_str)):]
_WriteToStream(ss + val_repr + '\n')
message = ExceptionToUnicode(exception)
_WriteToStream('%s: %s\n' % (exc_type.__name__, message))
|
cpsaltis/pythogram-core | src/gramcore/filters/morphology.py | Python | mit | 3,722 | 0 | """Morphological filters.
The following work on 2D arrays. They wrap the relevant scipy functions. The
ones provided by skimage are not well documented for now.
"""
from scipy.ndimage import morphology
def closing(parameters):
"""Calculates morphological closing of a greyscale image.
This is equal to performing a dilation and then an erosion.
It wraps `scipy.ndimage.morphology.grey_closing`. The `footprint`,
`structure`, `output`, `mode`, `cval` and `origin` options are not
supported.
Keep in mind that `mode` and `cval` influence the results. In this case
the default mode is used, `reflect`.
:param parameters['data'][0]: input array
:type parameters['data'][0]: numpy.array
:param parameters['size']: which neighbours to take into account, defaults
to (3, 3) a.k.a. numpy.ones((3, 3))
:type parameters['size']: list
:return: numpy.array
"""
data = parameters['data'][0]
size = tuple(parameters['size'])
return morphology.grey_closing(data, size=size)
def erosion(parameters):
"""Erodes a greyscale image.
For the simple case of a full and flat structuring element, it can be
viewed as a minimum filter over a sliding window.
It wraps `scipy.ndimage.morphology.grey_erosion`. The `footprint`,
`structure`, `output`, `mode`, `cval` and `origin` options are not
supported.
Keep in mind that `mode` and `cval` influence the results. In this case
the default mode is used, `reflect`.
:param parameters['data'][0]: input array
:type parameters['data'][0]: numpy.array
:param parameters['size']: which neighbours to take into account, defaults
| to (3, 3) a.k.a. numpy.ones((3, 3))
:type parameters['size']: list
:return: numpy.array
"""
data = parameters['data'][0]
size = tuple(parameters['size'])
return morphology.grey_erosion(data, size=size)
def dilation(parameters):
"""Dilates a greyscale image.
For the simple case of a full and flat structuring element, it can be
viewed as a maximum filter o | ver a sliding window.
It wraps `scipy.ndimage.morphology.grey_dilation`. The `footprint`,
`structure`, `output`, `mode`, `cval` and `origin` options are not
supported.
Keep in mind that `mode` and `cval` influence the results. In this case
the default mode is used, `reflect`.
:param parameters['data'][0]: input array
:type parameters['data'][0]: numpy.array
:param parameters['size']: which neighbours to take into account, defaults
to (3, 3) a.k.a. numpy.ones((3, 3))
:type parameters['size']: list
:return: numpy.array
"""
data = parameters['data'][0]
size = tuple(parameters['size'])
return morphology.grey_dilation(data, size=size)
def opening(parameters):
"""Calculates morphological opening of a greyscale image.
This is equal to performing a dilation and then an erosion.
It wraps `scipy.ndimage.morphology.grey_closing`. The `footprint`,
`structure`, `output`, `mode`, `cval` and `origin` options are not
supported.
Keep in mind that `mode` and `cval` influence the results. In this case
the default mode is used, `reflect`.
:param parameters['data'][0]: input array
:type parameters['data'][0]: numpy.array
:param parameters['size']: which neighbours to take into account, defaults
to (3, 3) a.k.a. numpy.ones((3, 3))
:type parameters['size']: list
:return: numpy.array
"""
data = parameters['data'][0]
size = tuple(parameters['size'])
return morphology.grey_opening(data, size=size)
|
srange/SU2 | SU2_PY/change_version_number.py | Python | lgpl-2.1 | 3,458 | 0.010411 | #!/usr/bin/env python
## \file change_version_number.py
# \brief Python script for updating the version number of the SU2 suite.
# \author A. Aranake
# \version 6.2.0 "Falcon"
#
# The current SU2 release has been coordinated by the
# SU2 International Developers Society <www.su2devsociety.org>
# with selected contributions from the open-source community.
#
# The main research teams contributing to the current release are:
# - Prof. Juan J. Alonso's group at Stanford University.
# - Prof. Piero Colonna's group at Delft University of Technology.
# - Prof. Nicolas R. Gauger's group at Kaiserslautern University of Technology.
# - Prof. Alberto Guardone's group at Polytechnic University of Milan.
# - Prof. Rafael Palacios' group at Imperial College London.
# - Prof. Vincent Terrapon's group at the University of Liege.
# - Prof. Edwin van der Weide's group at the University of Twente.
# - Lab. of New Concepts in Aeronautics at Tech. Institute of Aeronautics.
#
# Copyright 2012-2019, Francisco D. Palacios, Thomas D. Economon,
# Tim Albring, and the SU2 contributors.
#
# SU2 is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# SU2 is distributed in the hope that it w | ill be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public Li | cense for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with SU2. If not, see <http://www.gnu.org/licenses/>.
# make print(*args) function available in PY2.6+, does'nt work on PY < 2.6
from __future__ import print_function
# Run the script from the base directory (ie $SU2HOME). Grep will search directories recursively for matches in version number
import os,sys
oldvers = '2012-2018'
newvers = '2012-2019'
#oldvers = '6.1.0 "Falcon"'
#newvers = '6.2.0 "Falcon"'
if sys.version_info[0] > 2:
# In PY3, raw_input is replaced with input.
# For original input behaviour, just write eval(input())
raw_input = input
if os.path.exists('version.txt'):
os.remove('version.txt')
# Grep flag cheatsheet:
# -I : Ignore binary files
# -F : Match exact pattern (instead of regular expressions)
# -w : Match whole word
# -r : search directory recursively
# -v : Omit search string (.svn omitted, line containing ISC is CGNS related)
#TODO: replace with portable instructions. This works only on unix systems
os.system("grep -IFwr '%s' *|grep -vF '.svn' |grep -v ISC > version.txt"%oldvers)
# Create a list of files to adjust
filelist = []
f = open('version.txt','r')
for line in f.readlines():
candidate = line.split(':')[0]
if not candidate in filelist and candidate.find(sys.argv[0])<0:
filelist.append(candidate)
f.close()
print(filelist)
# Prompt user before continuing
yorn = ''
while(not yorn.lower()=='y'):
yorn = raw_input('Replace %s with %s in the listed files? [Y/N]: '%(oldvers,newvers))
if yorn.lower()=='n':
print('The file version.txt contains matches of oldvers')
sys.exit()
# Loop through and correct all files
for fname in filelist:
s = open(fname,'r').read()
s_new = s.replace(oldvers,newvers)
f = open(fname,'w')
f.write(s_new)
f.close()
os.system('rm -rf version.txt')
|
belokop/indico_bare | migrations/versions/201511251405_33a1d6f25951_add_timetable_related_tables.py | Python | gpl-3.0 | 4,295 | 0.004889 | """Add timetable related tables
Revision ID: 33a1d6f25951
Revises: 225d0750c216
Create Date: 2015-11-25 14:05:51.856236
"""
import sqlalchemy as sa
from alembic import op
from indico.core.db.sqlalchemy import PyIntEnum, UTCDateTime
from indico.modules.events.timetable.models.entries import TimetableEntryType
# revision identifiers, used by Alembic.
revision = '33a1d6f25951'
down_revision = '225d0750c216'
def upgrade():
# Break
op.create_table(
'breaks',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(), nullable=False),
sa.Column('description', sa.Text(), nullable=False),
sa.Column('duration', sa.Interval(), nullable=False),
sa.Column('text_color', sa.String(), nullable=False),
sa.Column('background_color', sa.String(), nullable=False),
sa.Column('room_name', sa.String(), nullable=False),
sa.Column('inherit_location', sa.Boolean(), nullable=False),
sa.Column('address', sa.Text(), nullable=False),
sa.Column('venue_id', sa.Integer(), nullable=True, index=True),
sa.Column('venue_name', sa.String(), nullable=False),
sa.Column('room_id', sa.Integer(), nullable=True, index=True),
sa.CheckConstraint("(room_id IS NULL) OR (venue_name = '' AND room_name = '')",
name='no_custom_location_if_room'),
sa.CheckConstraint("(venue_id IS NULL) OR (venue_name = '')", name='no_venue_name_if_venue_id'),
sa.CheckConstraint("(room_id IS NULL) OR (venue_id IS NOT NULL)", name='venue_id_if_room_id'),
sa.CheckConstraint("NOT inherit_location OR (venue_id IS NULL AND room_id IS NULL AND venue_name = '' AND "
"room_name = '' AND address = '')", name='inherited_location'),
sa.CheckConstraint("(text_color = '') = (background_color = '')", name='both_or_no_colors'),
sa.CheckConstraint("text_color != '' AND background_color != ''", name='colors_not_empty'),
sa.ForeignKeyConstraint(['room_id'], ['roombooking.rooms.id']),
sa.ForeignKeyConstraint(['venue_id'], ['roombooking.locations.id']),
sa.ForeignKeyConstraint(['venue_id', 'room_id'], ['roombooking.rooms.location_id', 'roombooking.rooms.id']),
sa.PrimaryKeyConstraint('id'),
schema='events'
)
# TimetableEntry
op.create_table(
'timetable_entries',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('event_id', sa.Integer(), nullable=False, index=True),
sa.Column('parent_id', sa.Integer(), nullable=True, index=True),
sa.Column('session_block_id', sa.Integer(), nullable=True, index=True, unique=True),
sa.Column('contribution_id', sa.Integer(), nullable=True, index=True, unique=True),
sa.Column('break_id', sa.Integer(), nullable=True, index=True, unique=True),
sa.Column('type', PyIntEnum(TimetableEntryType), nullable=False),
sa.Column('start_dt', UTCDateTime, nullable=False),
sa.Index('ix_timetable_entries_start_dt_desc', sa.text('start_dt DESC')),
sa.CheckConstraint('type != 1 OR parent_id IS NULL', name='valid_parent'),
sa.CheckConstraint('type != 1 OR (contribution_id IS NULL AND break_id IS NULL AND '
'session_block_id IS NOT | NULL)', name='valid_session_block'),
sa.CheckConstraint('type != 2 OR (session_block_id IS NULL AND break_id IS NULL AND '
'contribution_id IS NOT NULL)', name='valid_contribution'),
sa.CheckConstraint('type != 3 OR (contribution_id IS NULL AND session_block_id IS NULL AND '
'break_id IS NOT NULL)', name='valid_break'),
sa.ForeignKeyConstraint(['break_id'], ['events.breaks.id']),
sa.ForeignKeyConstra | int(['contribution_id'], ['events.contributions.id']),
sa.ForeignKeyConstraint(['event_id'], ['events.events.id']),
sa.ForeignKeyConstraint(['parent_id'], ['events.timetable_entries.id']),
sa.ForeignKeyConstraint(['session_block_id'], ['events.session_blocks.id']),
sa.PrimaryKeyConstraint('id'),
schema='events'
)
def downgrade():
op.drop_table('timetable_entries', schema='events')
op.drop_table('breaks', schema='events')
|
hongaar/meterkast | components/timedReader.py | Python | mit | 1,120 | 0 | from helpers.timer import TaskThread
class TimedReader(TaskThread):
DEFAULT_INTERVAL = 1 * 60
HIBERNATION_INTERVAL = 5 * 60
DEBUG_INTERVAL = 5
probing_callback = None
probed_callback = None
def | __init__(self, p1, debug=False):
TaskThread.__init__(self)
self | .debug = debug
self.p1 = p1
self.set_interval(self.DEFAULT_INTERVAL)
def set_interval(self, interval):
if self.debug:
TaskThread.set_interval(self, self.DEBUG_INTERVAL)
else:
TaskThread.set_interval(self, interval)
def wakeup(self):
self.set_interval(self.DEFAULT_INTERVAL)
def hibernate(self):
self.set_interval(self.HIBERNATION_INTERVAL)
def event_probing_setup(self, callback):
self.probing_callback = callback
def event_probed_setup(self, callback):
self.probed_callback = callback
def event_start(self):
self.start()
def event_stop(self):
self.shutdown()
def task(self):
self.probing_callback()
data = self.p1.probe()
self.probed_callback(data)
|
sinanm89/barista | build/barista-site/barista/settings.py | Python | gpl-2.0 | 6,858 | 0.002333 | # Django settings for barista project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Sinan Midillili', 'sinan@rahatol.com'),
)
DEFAULT_FROM_EMAIL = 'sinan@rahatol.com',
SERVER_EMAIL = 'sinan@rahatol.com'
MANAGERS = ADMINS
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/Istanbul'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = os.path.join(os.path.dirname(__file__), 'media/files/')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = 'media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
# STATIC_ROOT = os.path.join(os.path.realpath(os.path.dirname( __file__ )), 'media/' )
# STATIC_ROOT = os.path.join( os.path.dirname(__file__), 'media/')
# print STATIC_ROOT
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/media/'
# Additional locations of static files
# STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
# ("suit/", os.path.join(os.path.realpath(os.path.dirname(__file__)), 'media/suit/')),
# ("static/css/", os.path.join(os.path.realpath(os.path.dirname(__file__)), 'media/css/')),
# ("static/images/", os.path.join(os.path.realpath(os.path.dirname(__file__)), 'media/images/')),
# ("static/js/", os.path.join(os.path.realpath(os.path.dirname(__file__)), 'media/js/')),
# ("sta | tic/markitup/", os.path.join(os.path. | realpath(os.path.dirname(__file__)), 'media/markitup/')),
# )
# List of finder classes that know how to find static files in
# various locations.
# STATICFILES_FINDERS = (
# 'django.contrib.staticfiles.finders.FileSystemFinder',
# 'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
#
# )
# Make this unique, and don't share it with anybody.
SECRET_KEY = '94*hza*y@ba!rcq#kalendermesrepcg8%)2%uye9x$1(%1w^x*e93'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
#
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
)
ROOT_URLCONF = 'barista.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'barista.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(os.path.realpath(os.path.dirname(__file__)), 'templates/'),
# 'django.template.loaders.filesystem.Loader',
# 'django.template.loaders.app_directories.Loader',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.gis',
'gunicorn',
'suit',
'barista.restaurants',
'django_extensions',
'django_kibrit',
'django.contrib.admin',
'django.contrib.admindocs',
)
SUIT_CONFIG = {
'ADMIN_NAME': 'Barista',
'SHOW_REQUIRED_ASTERISK': True
}
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
},
}
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = 'sinanm89@gmail.com'
EMAIL_HOST_PASSWORD = 'THERE IS A PASSWORD HERE'
EMAIL_USE_TLS = True
KIBRIT_PATH = "/home/snn/Projects/barista/src/barista"
TEMPLATE_CONTEXT_PROCESSORS += ('django_kibrit.context_processors.revision',)
POSTGIS_VERSION = (1, 5, 3)
try:
from settings_local import *
except ImportError:
pass
|
Lightshadow244/OwnMusicWeb | ownmusicweb/manage.py | Python | apache-2.0 | 809 | 0 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ownmusicweb.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is real | ly that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv) | |
ianmiell/shutit-test | test/docker_tests/16/test16.py | Python | mit | 395 | 0.043038 | from shu | tit_module import ShutItModule
class test16(ShutItModule):
def build(self, shutit):
shutit.login()
shutit.login(command='bash')
shutit.send('ls',note='We are l | isting files')
shutit.logout()
shutit.logout()
return True
def module():
return test16(
'shutit.test16.test16.test16', 210790650.002938762,
description='',
maintainer='',
depends=['shutit.tk.setup']
)
|
capitalone/cloud-custodian | tools/c7n_gcp/c7n_gcp/resources/pubsub.py | Python | apache-2.0 | 3,103 | 0.000967 | # Copyright 2018 Capital One Services, LLC
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from c7n.utils import type_schema
from c7n_gcp.actions import MethodAction
from c7n_gcp.provider import resources
from c7n_gcp.query import QueryResourceManager, TypeInfo
"""
todo, needs detail_spec
"""
@resources.register('pubsub-topic')
class PubSubTopic(QueryResourceManager):
"""GCP resource: https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.topics
"""
class resource_type(TypeInfo):
service = 'pubsub'
version = 'v1'
component = 'projects.topics'
enum_spec = ('list', 'topics[]', None)
scope_template = "projects/{}"
name = id = "name"
default_report_fields = ["name", "kmsKeyName"]
asset_type = "pubsub.googleapis.com/Topic"
@staticmethod
def get(client, resource_info):
return client.execute_command(
'get', {'topic': resource_info['topic_id']})
@PubSubTopic.action_registry.register('delete')
class DeletePubSubTopic(MethodAction):
schema = type_schema('delete')
method_spec = {'op': 'delete'}
def get_resource_params(self, m, r):
return {'topic': r['name']}
@resources.register('pubsub-subscription')
class PubSubSubscription(QueryResourceManager):
"""GCP resource: https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions
"""
class resource_type(TypeInfo):
service = 'pubsub'
ve | rsion = 'v1'
component = 'projects.subscriptions'
enum_spec = ('list', 'subscriptions[]', None)
scope_template = 'projects/{}'
name = id = 'name'
default_report_fields = [
"name", "topic", "ackDeadlineSeconds",
"retainAckedMessages", "messageRetentionDuration"]
asset_type = "pubsub.googleapis.com/Subscription"
@staticmethod
def get(client, re | source_info):
return client.execute_command(
'get', {'subscription': resource_info['subscription_id']})
@PubSubSubscription.action_registry.register('delete')
class DeletePubSubSubscription(MethodAction):
schema = type_schema('delete')
method_spec = {'op': 'delete'}
def get_resource_params(self, m, r):
return {'subscription': r['name']}
@resources.register('pubsub-snapshot')
class PubSubSnapshot(QueryResourceManager):
"""GCP resource: https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.snapshots
"""
class resource_type(TypeInfo):
service = 'pubsub'
version = 'v1'
component = 'projects.snapshots'
enum_spec = ('list', 'snapshots[]', None)
scope_template = 'projects/{}'
name = id = 'name'
default_report_fields = [
"name", "topic", "expireTime"]
@PubSubSnapshot.action_registry.register('delete')
class DeletePubSubSnapshot(MethodAction):
schema = type_schema('delete')
method_spec = {'op': 'delete'}
def get_resource_params(self, m, r):
return {'snapshot': r['name']}
|
jonzobrist/Percona-Server-5.1 | kewpie/lib/util/mysqlBaseTestCase.py | Python | bsd-3-clause | 12,988 | 0.018017 | #! /usr/bin/env python
# -*- mode: python; indent-tabs-mode: nil; -*-
# vim:expandtab:shiftwidth=2:tabstop=2:smarttab:
#
# Copyright (C) 2011 Patrick Crews
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import unittest
import os
import time
import difflib
import subprocess
import MySQLdb
servers = None
class mysqlBaseTestCase(unittest.TestCase):
def setUp(self):
""" If we need to do anything pre-test, we do it here.
Any code here is executed before any test method we
may execute
"""
self.servers = servers
return
def tearDown(self):
#server_manager.reset_servers(test_executor.name)
queries = ["DROP SCHEMA IF EXISTS test"
,"CREATE SCHEMA IF NOT EXISTS test"
]
for server in self.servers:
retcode, result = self.execute_queries(queries, server, schema='mysql')
self.assertEqual(retcode,0,result)
# Begin our utility code here
# This is where we add methods that enable a test to do magic : )
def execute_cmd(self, cmd, stdout_path, exec_path=None, get_output=False):
stdout_file = open(stdout_path,'w')
cmd_subproc = subprocess.Popen( cmd
, shell=True
, cwd=exec_path
, stdout = stdout_file
, stderr = subprocess.STDOUT
)
cmd_subproc.wait()
retcode = cmd_subproc.returncode
stdout_file.close()
if get_output:
data_file = open(stdout_path,'r')
output = ''.join(data_file.readlines())
else:
output = None
return retcode, output
def get_tables(self, server, schema):
""" Return a list of the tables in the
schema on the server
"""
results = []
query = "SHOW TABLES IN %s" %(schema)
retcode, table_set = self.execute_query(query, server)
for table_data in table_set:
table_name = table_data[0]
results.append(table_name)
return results
def check_slaves_by_query( self
, master_server
, other_servers
, query
, expected_result = None
):
""" We execute the query across all servers
and return a dict listing any diffs found,
None if all is good.
If a user provides an expected_result, we
will skip executing against the master
This is done as it is assumed the expected
result has been generated / tested against
the master
"""
comp_results = {}
if expected_result:
pass # don't bother getting it
else:
# run against master for 'good' value
retcode, expected_result = self.execute_query(query, master_server)
for server in other_servers:
retcode, slave_result = self.execute_query(query, server)
#print "%s: expected_result= %s | slave_result= %s" % ( server.name
# , expected_result
# , slave_result_
# )
if not expected_result == slave_result:
comp_data = "%s: expected_result= %s | slave_result= %s" % ( server.name
, expected_result
, slave_result
)
if comp_results.has_key(server.name):
comp_results[server.name].append(comp_data)
else:
comp_results[server.name]=[comp_data]
if comp_results:
return comp_results
return None
def check_slaves_by_checksum( self
, master_server
, other_servers
, schemas=['test']
, tables=[]
):
""" We compare the specified tables (default = all)
from the specified schemas between the 'master'
and the other servers provided (via list)
via CHECKSUM
We return a dictionary listing the server
and any tables that differed
"""
comp_results = {}
for server in other_servers:
for schema in schemas:
for table in self.get_tables(master_server, schema):
query = "CHECKSUM TABLE %s.%s" %(schema, table)
retcode, master_checksum = self.execute_query(query, master_server)
retcode, slave_checksum = self.execute_query(query, server)
#print "%s: master_checksum= %s | slave_checksum= %s" % ( table
# , master_checksum
# , slave_checksum
# )
if not master_checksum == slave_checksum:
comp_data = "%s: master_checksum= %s | slave_checksum= %s" % ( table
, master_checksum
, slave_checksum
)
if comp_results.has_key(server.name):
comp_results[server.name].append(comp_data)
else:
comp_results[server.name]=[comp_data]
if comp_results:
return comp_results
return None
def take_mysqldump( self
, server
, databases=[]
, tables=[]
, dump_path = None
, cmd_root = None):
""" Take a mysqldump snapshot of the | given
server, storing the output to dump_path
"""
if not dump_path:
dump_path = os.path.join(server.vardir, 'dumpfile.dat')
if cmd_root:
dum | p_cmd = cmd_root
else:
dump_cmd = "%s --no-defaults --user=root --port=%d --host=127.0.0.1 --protocol=tcp --result-file=%s" % ( server.mysqldump
, server.master_port
, dump_path
)
if databases:
if len(databases) > 1:
# We have a list of db's that are to be dumped so we handle things
dump_cmd = ' '.join([dump_ |
t-mertz/slurmCompanion | django-web/webcmd/cmdtext.py | Python | mit | 4,609 | 0.006943 | import sys
MAX_NUM_STORED_LINES = 200
MAX_NUM_LINES = 10
LINEWIDTH = 80
class CmdText(object):
"""
Represents a command line text device. Text is split into lines
corresponding to the linewidth of the device.
"""
def __init__(self):
"""
Construct empty object.
"""
self.num_lines = 0
self.remaining_lines = MAX_NUM_LINES
self.lines = []
def insert(self, string):
"""
Insert string at the end. This always begins a new line.
"""
if (self.num_lines >= MAX_NUM_LINES):
pass
input_num_lines = num_lines(string)
#if (input_num_lines > self.remaining_lines):
# num = self.remaining_lines
#else:
# num = input_num_lines
num = input_num_lines
new_lines = get_lines(string)
self.lines += new_lines[-num:]
self.update_num_lines()
def merge_after(self, obj):
"""
Merge with another CmdText object by appending the input objects content.
"""
self.lines
def strip_lines(self):
"""
Remove excessive number of lines. This deletes the oldest half.
"""
if (self.num_lines > MAX_NUM_STORED_LINES):
for i in range(MAX_NUM_STORED_LINES // 2):
self.lines.pop(i)
def update_num_lines(self):
"""
Update the number of lines member.
"""
self.num_lines = len(self.lines)
def get_line(self, n):
"""
Return the line with index n.
"""
if n < self.num_lines:
return self.lines[n]
else:
raise IndexError("Line index out of range.")
def print_screen(self):
"""
Return MAX_NUM_LINES lines.
"""
return self.lines[-MAX_NUM_LINES:]
def __iter__(self):
"""
Iterator for CmdText object.
"""
for l in self.lines:
yield l
def __getitem__(self, ind):
return self.lines[ind]
def num_lines(string):
"""
Return number of lines.
"""
line_list = string.split("\n")
num = len(line_list)
for l in line_list:
num += (len(string) // LINEWIDTH + 1)
return num
def get_lines(string):
"""
Return list of lines extracted from string.
"""
line_list = string.split('\n')
new_list = []
for l in line_list:
new_list += [l[i*LINEWIDTH:(i+1)*LINEWIDTH] for i in range(len(l) // LINEWIDTH + 1)]
return new_list
class Command(CmdText):
def __init__(self, string, rind=None):
CmdText.__init__(self)
se | lf.insert(string)
if (rind is not None):
self.response = rind
class Response(CmdText):
def __init__(self, string, cind=None):
CmdText.__init__(self)
self.insert(string)
if (cind is not None):
self.command = cind
class TestCase(object):
"""
Base class for tests.
"""
@classmethod
def run(cls):
"""
Runs all tests (methods which begin with 'test').
" | ""
#print(cls)
max_len = max([len(a) for a in cls.__dict__])
for key in cls.__dict__:
if key.startswith("test"):
fill = max_len - len(key)
sys.stdout.write("Testing {} ...{} ".format(key, '.'*fill))
try:
cls.__dict__[key]()
except:
raise
else:
print("Test passed!")
print("All tests passed!")
class StaticTest(TestCase):
"""
Tests for static methods.
"""
def test_get_lines_with_empty_string():
assert get_lines("") == [""]
def test_get_lines_with_short_string():
assert len(get_lines("a"*(LINEWIDTH-1))) == 1
def test_get_lines_with_long_string():
assert len(get_lines("a"*(2*LINEWIDTH-1))) == 2
def test_get_lines_with_very_long_string():
assert len(get_lines("a"*(4*LINEWIDTH-1))) == 4
def test_get_lines_with_long_text_string():
text = "This is a test string, which should simulate real text. The command should" \
+ " correctly split this text into two lines."
LINEWIDTH = 80
correct_lines = [text[:LINEWIDTH], text[LINEWIDTH:]]
assert len(get_lines(text)) == len(text) // LINEWIDTH + 1
assert get_lines(text) == correct_lines
class CmdTextTest(object):
"""
Tests for CmdText class methods.
"""
pass |
pydoit/doit-cmd | test_doitcmd.py | Python | mit | 2,946 | 0.004752 |
from doit.tools import Interactive
import doitcmd
from doitcmd import BaseCommand
class TestBaseCommand_OptStr(object):
def test_no_value(self):
assert '' == BaseCommand.opt_str()
def test_single_value(self):
assert '--name test' == BaseCommand.opt_str({'name':'test'})
assert '-n test' == BaseCommand.opt_str({'n':'test'})
assert '--flag' == BaseCommand.opt_str({'flag': True})
assert '' == BaseCommand.opt_str({'flag': False})
assert '' == BaseCommand.opt_str({'flag': None})
def test_many_values(self):
opt1 = {'n':'test'}
opt2 = {'val2':'lala', 'o':True}
got = BaseCommand.opt_str(opt1, opt2)
assert '-n test' in got
assert '--val2 lala' in got
assert '-o' in got
class TestBaseCommand_Init(object):
class MyCmd(BaseCommand):
sudo = True
interactive = True
base_options = {'p1': 'ha'}
def test_defaults(self):
cmd = BaseCommand()
assert False == cmd.sudo
assert False == cmd.interactive
assert {} == cmd.options
def test_cvar_defaults(self):
cmd = self.MyCmd()
assert True == cmd.sudo
assert True == cmd.interactive
assert {'p1': 'ha'} == cmd.options
def test_set_init(self):
cmd = self.MyCmd(sudo=False, interactive=True, options={'p2': 'bar'})
assert False == cmd.sudo
assert True == cmd.interactive
assert {'p1': 'ha', 'p2': | 'bar'} == cmd.options
class TestBaseCommand_Action(object):
class MyCmd(BaseCommand):
cmd_template = 'foo {options} {target}'
def __call__(self, target):
action = self.action(self.cmd_template.format(
| target=target,
options=self.opt_str(self.options),
))
return {
'name': target,
'actions': [action],
}
def test_normal(self):
cmd = self.MyCmd(options={'bar': 'baz'})
assert 'foo --bar baz tx' == cmd('tx')['actions'][0]
def test_sudo(self):
cmd = self.MyCmd(sudo=True, options={'bar': 'baz'})
assert 'sudo foo --bar baz tx' == cmd('tx')['actions'][0]
def test_interactive(self):
cmd = self.MyCmd(interactive=True, options={'bar': 'baz'})
action = cmd('tx')['actions'][0]
assert isinstance(action, Interactive)
assert 'foo --bar baz tx' == action.action
class TestCmd(object):
def test_cmd(self):
task = doitcmd.cmd('foo -p bar', file_dep=['xxx'])
assert ['xxx'] == task['file_dep']
assert ['foo -p bar'] == task['actions']
def test_interactive(self):
task = doitcmd.interactive('foo -p bar', file_dep=['xxx'])
assert ['xxx'] == task['file_dep']
action = task['actions'][0]
assert 'foo -p bar' == action.action
assert isinstance(action, Interactive)
|
jedie/djangocms-find_and_replace | find_and_replace/tests/test_doctest.py | Python | gpl-3.0 | 2,605 | 0.001919 | # coding: utf-8
"""
django find&replace
~~~~~~~~~~~~~~~
:copyleft: 2015 by find&replace team, see AUTHORS for more details.
:created: 2015 by JensDiemer.de
:license: GNU GPL v3 or above, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import os
import doc | test
import sys
import find_and_replace
BASE_DIR = find_and_replace.__file__
SKIP_DIRS = (".settings", ".git", "dist", ".egg-info")
SKIP_FILES = ("setup.py", "test.py")
def get_all_doctests(base_path, verbose=False):
modules = []
for root, dirs, filelist in os.walk(base_path, followlinks=True):
for skip_dir in | SKIP_DIRS:
if skip_dir in dirs:
dirs.remove(skip_dir) # don't visit this directories
for filename in filelist:
if not filename.endswith(".py"):
continue
if filename in SKIP_FILES:
continue
sys.path.insert(0, root)
try:
module = __import__(filename[:-3])
except ImportError as err:
if verbose:
sys.stderr.write(
"\tDocTest import %s error %s\n" % (filename, err)
)
except Exception as err:
if verbose:
sys.stderr.write(
"\tDocTest %s error %s\n" % (filename, err)
)
else:
try:
suite = doctest.DocTestSuite(module)
except ValueError: # has no docstrings
continue
test_count = suite.countTestCases()
if test_count<1:
if verbose:
sys.stderr.write(
"\tNo DocTests in %r\n" % module.__name__
)
continue
if verbose:
file_info = module.__file__
else:
file_info = module.__name__
sys.stderr.write(
"\t%i DocTests in %r\n" % (test_count,file_info)
)
modules.append(module)
finally:
del sys.path[0]
return modules
def load_tests(loader, tests, ignore):
sys.stderr.write("\ncollect DocTests:\n")
path = os.path.abspath(os.path.dirname(BASE_DIR))
modules = get_all_doctests(
base_path=path,
# verbose=True
)
for module in modules:
tests.addTests(doctest.DocTestSuite(module))
return tests
|
gigglearrows/anniesbot | pajbot/models/emotecomboparser.py | Python | mit | 1,638 | 0.001221 | import re
import logging
log = logging.getLogger('pajbot')
class EmoteComboParser:
def __init__(self, bot):
self.bot = bot
self.emote_count = 0
self.current_emote = None
def inc_emote_count(self):
self.emote_count += 1
if self.emote_count >= 5:
self.bot.websocket_manager.emit('emote_combo', {'emote': self.current_emote, 'count': self.emote_count})
def reset | (self):
self.emote_count = 0
self.current_emote = None
def parse_line(self, msg, source, emotes):
if len(emotes) == 0:
# Ignore messages without any emotes
return False
prev_code = None
for emote in emotes:
if prev_code is | not None:
if prev_code != emote['code']:
# The message contained more than 1 unique emote, reset.
self.reset()
return False
else:
prev_code = emote['code']
emote = emotes[0]
if self.current_emote is not None:
if not self.current_emote['code'] == emote['code']:
# The emote of this message is not the one we were previously counting, reset.
# We do not stop.
# We start counting this emote instead.
self.reset()
if self.current_emote is None:
self.current_emote = {
'code': emote['code'],
'twitch_id': emote.get('twitch_id', None),
'bttv_hash': emote.get('bttv_hash', None)
}
self.inc_emote_count()
|
jumpstarter-io/keystone | keystone/common/ldap/core.py | Python | apache-2.0 | 76,368 | 0.000013 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import codecs
import functools
import os.path
import re
import sys
import weakref
import ldap.filter
import ldappool
from oslo_log import log
import six
from keystone import exception
from keystone.i18n import _
from keystone.i18n import _LW
LOG = log.getLogger(__name__)
LDAP_VALUES = {'TRUE': True, 'FALSE': False}
CONTROL_TREEDELETE = '1.2.840.113556.1.4.805'
LDAP_SCOPES = {'one': ldap.SCOPE_ONELEVEL,
'sub': ldap.SCOPE_SUBTREE}
LDAP_DEREF = {'always': ldap.DEREF_ALWAYS,
'default': None,
'finding': ldap.DEREF_FINDING,
'never': ldap.DEREF_NEVER,
'searching': ldap.DEREF_SEARCHING}
LDAP_TLS_CERTS = {'never': ldap.OPT_X_TLS_NEVER,
'demand': ldap.OPT_X_TLS_DEMAND,
'allow': ldap.OPT_X_TLS_ALLOW}
# RFC 4511 (The LDAP Protocol) defines a list containing only the OID '1.1' to
# indicate that no attributes should be returned besides the DN.
DN_ONLY = ['1.1']
_utf8_encoder = codecs.getencoder('utf-8')
def utf8_encode(value):
"""Encode a basestring to UTF-8.
If the string is unicode encode it to UTF-8, if the string is
str then assume it's already encoded. Otherwise raise a TypeError.
:param value: A basestring
:returns: UTF-8 encoded version of value
:raises: TypeError if value is not basestring
"""
if isinstance(value, six.text_type):
return _utf8_encoder(value)[0]
elif isinstance(value, six.binary_type):
return value
else:
raise TypeError("value must be basestring, "
"not %s" % value.__class__.__name__)
_utf8_decoder = codecs.getdecoder('utf-8')
def utf8_decode(value):
"""Decode a from UTF-8 into unicode.
If the value is a binary string assume it's UTF-8 encoded and decode
it into a unicode string. Otherwise convert the value from its
type into a unicode string.
:param value: value to be returned as unicode
:returns: value as unicode
:raises: UnicodeDecodeError for invalid UTF-8 encoding
"""
if isinstance(value, six.binary_type):
return _utf8_decoder(value)[0]
return six.text_type(value)
def py2ldap(val):
"""Type convert a Python value to a type accepted by LDAP (unicode).
The LDAP API only accepts strings for values therefore convert
the value's type to a unicode string. A subsequent type conversion
will encode the unicode as UTF-8 as required by the python-ldap API,
but for now we just want a string representation of the value.
:param val: The value to convert to a LDAP string representation
:returns: unicode string representation of value.
"""
if isinstance(val, bool):
return u'TRUE' if val else u'FALSE'
else:
return six.text_type(val)
def enabled2py(val):
"""Similar to ldap2py, only useful for the enabled attribute."""
try:
return LDAP_VALUES[val]
except KeyError:
pass
try:
return int(val)
except ValueError:
pass
return utf8_decode(val)
def ldap2py(val):
"""Convert an LDAP formatted value to Python type used by OpenStack.
Virtually all LDAP values are stored as UTF-8 encoded strings.
OpenStack prefers values which are unicode friendly.
:param val: LDAP formatted value
:returns: val converted to preferred Python type
"""
return utf8_decode(val)
def convert_ldap_result(ldap_result):
"""Convert LDAP search result to Python types used by OpenStack.
Each result tuple is of the form (dn, attrs), where dn is a string
containing the DN (distinguished name) of the entry, and attrs is
a dictionary containing the attributes associated with the
entry. The keys of attrs are strings, and the associated values
are lists of strings.
OpenStack wants to use Python types of its choosing. Strings will
be unicode, truth values boolean, whole numbers int's, etc. DN's will
also be decoded from UTF-8 to unicode.
:param ldap_result: LDAP search result
:returns: list of 2-tuples containing (dn, attrs) where dn is unicode
and attrs is a dict whose values are type converted to
OpenStack preferred types.
"""
py_result = []
at_least_one_referral = False
for dn, attrs in ldap_result:
ldap_attrs = {}
if dn is None:
# this is a Referral object, rather than an Entry object
at_least_one_referral = True
continue
for kind, values in six.iteritems(attrs):
try:
val2py = enabled2py if kind == 'enabled' else ldap2py
ldap_attrs[kind] = [val2py(x) for x in values]
except UnicodeDecodeError:
LOG.debug('Unable to decode value for attribute %s', kind)
py_result.append((utf8_decode(dn), ldap_attrs))
if at_least_one_referral:
LOG.debug(('Referrals were returned and ignored. Enable referral '
'chasing in keystone.conf via [ldap] chase_referrals'))
return py_result
def safe_iter(attrs):
if attrs is None:
return
elif isinstance(attrs, list):
for e in attrs:
yield e
else:
yield attrs
def parse_deref(opt):
try:
return LDAP_DEREF[opt]
except KeyError:
raise ValueError(_('Invalid LDAP deref option: %(option)s. '
'Choose one of: %(options)s') %
{'option': opt,
'options': ', '.join(LDAP_DEREF.keys()), })
def parse_tls_cert(opt):
try:
return LDAP_TLS_CERTS[opt]
except KeyError:
raise ValueError(_(
'Invalid LDAP TLS certs option: %(option)s. '
'Choose one of: %(options)s') % {
'option': opt,
'options': ', '.join(LDAP_TLS_CERTS.keys())})
def ldap_scope(scope):
try:
return LDAP_SCOPES[scope]
except KeyError:
raise ValueError(
_('Invalid LDAP scope: %(scope)s. Choose one of | : %(options)s') % {
'scope': scope,
'options': ', '.join(LDAP_SCOPES.keys())})
def prep_case_insensitive(value):
"""Prepare a string for case-insensitive comparison.
Th | is is defined in RFC4518. For simplicity, all this function does is
lowercase all the characters, strip leading and trailing whitespace,
and compress sequences of spaces to a single space.
"""
value = re.sub(r'\s+', ' ', value.strip().lower())
return value
def is_ava_value_equal(attribute_type, val1, val2):
"""Returns True if and only if the AVAs are equal.
When comparing AVAs, the equality matching rule for the attribute type
should be taken into consideration. For simplicity, this implementation
does a case-insensitive comparison.
Note that this function uses prep_case_insenstive so the limitations of
that function apply here.
"""
return prep_case_insensitive(val1) == prep_case_insensitive(val2)
def is_rdn_equal(rdn1, rdn2):
"""Returns True if and only if the RDNs are equal.
* RDNs must have the same number of AVAs.
* Each AVA of the RDNs must be the equal for the same attribute type. The
order isn't significant. Note that an attribute type will only be in one
AVA in an RDN, otherwise the DN wouldn't be valid.
* Attribute types aren't case sensitive. Note that attribute type
comparison is more complicated than implemented. This function only
compares case-insentive. The code should handle mul |
lukovkin/ufcnn-keras | models/UFCNN1_REPO_V16_TESTMODE.py | Python | mit | 51,403 | 0.013054 | from __future__ import absolute_import
from __future__ import print_function
import sys
import glob
import time
import numpy as np
import pandas as pd
import os.path
import time
import datetime
import re
from keras.preprocessing import sequence
from keras.optimizers import SGD, RMSprop, Adagrad
from keras.utils import np_utils
from keras.models import Sequential, Graph, Model
from keras.models import model_from_json
from keras.layers import Input, merge, Flatten, Dense, Activation, Convolution1D, ZeroPadding1D
#from keras.layers.core import Dense, Dropout, Activation, TimeDistributedDense, Flatten, Reshape, Permute, Merge, Lambda
#from keras.layers.convolutional import Convolution1D, MaxPooling1D, Convolution2D, MaxPooling2D, UpSampling1D, UpSampling2D, ZeroPadding1D
from keras.layers.advanced_activations import ParametricSoftplus, SReLU
from keras.callbacks import ModelCheckpoint, Callback
import matplotlib.pyplot as plt
path = "./training_data_large/" # to make sure signal files are written in same directory as data files
def draw_model(model):
from IPython.display import SVG
from keras.utils.visualize_util import model_to_dot
from keras.utils.visualize_util import plot
#graph = to_graph(model, show_shape=True)
#graph.write_png("UFCNN_1.png")
SVG(model_to_dot(model).create(prog='dot', format='svg'))
plot(model, to_file='UFCNN_1.png')
def print_nodes_shapes(model):
for k, v in model.inputs.items():
print("{} : {} : {} : {}".format(k, type(v), v.input_shape, v.output_shape))
for k, v in model.nodes.items():
print("{} : {} : {} : {}".format(k, type(v), v.input_shape, v.output_shape))
for k, v in model.outputs.items():
print("{} : {} : {} : {}".format(k, type(v), v.input_shape, v.output_shape))
def print_layers_shapes(model):
for l in model.layers:
print("{} : {} : {}".format(type(l), l.input_shape, l.output_shape))
def save_neuralnet (model, model_name):
json_string = model.to_json()
open(path + model_name + '_architecture.json', 'w').write(json_string)
model.save_weights(path + model_name + '_weights.h5', overwrite=True)
yaml_string = model.to_yaml()
with open(path + model_name + '_data.yml', 'w') as outfile:
outfile.write( yaml_string)
def load_neuralnet(model_name):
"""
reading the model from disk - including all the trained weights and the complete model design (hyperparams, planes,..)
"""
arch_name = path + model_name + '_architecture.json'
weight_name = path + model_name + '_weights.h5'
if not os.path.isfile(arch_name) or not os.path.isfile(weight_name):
print("model_name given and file %s and/or %s not existing. Aborting." % (arch_name, weight_name))
sys.exit()
print("Loaded model: ",model_name)
model = model_from_json(open(arch_name).read())
model.load_weights(weight_name)
return model
def ufcnn_model_concat(sequence_length=5000,
features=1,
nb_filter=150,
filter_length=5,
output_dim=1,
optimizer='adagrad',
loss='mse',
regression = True,
class_mode=None,
activation="softplus",
init="lecun_uniform"):
#model = Graph()
#model.add_input(name='input', input_shape=(None, features))
main_input = Input(name='input', shape=(None, features))
#########################################################
#model.add_node(ZeroPadding1D(2), name='input_padding', input='input') # to avoid lookahead bias
input_padding = (ZeroPadding1D(2))(main_input) # to avoid lookahead bias
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='valid', init=init, input_shape=(sequence_length, features)), name='conv1', input='input_padding')
#model.add_node(Activation(activation), name='relu1', input='conv1')
conv1 = Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='valid', init=init, input_shape=(sequence_length, features))(input_padding)
relu1 = (Activation(activation))(conv1)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init), name='conv2', input='relu1')
#model.add_node(Activation(activation), name='relu2', input='conv2')
conv2 = (Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init))(relu1)
relu2 = (Activation(activation))(conv2)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter | , filter_length=filter_length, border_mode='same', init=init), name='conv3', input='relu2')
#model.add_node(Activation(activation), name='relu3', input='conv3')
conv3 = (Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init))(relu2)
relu3 = (Activation(activation))(co | nv3)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init), name='conv4', input='relu3')
#model.add_node(Activation(activation), name='relu4', input='conv4')
conv4 = (Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init))(relu3)
relu4 = (Activation(activation))(conv4)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init), name='conv5', input='relu4')
#model.add_node(Activation(activation), name='relu5', input='conv5')
conv5 = (Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init))(relu4)
relu5 = (Activation(activation))(conv5)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter,filter_length=filter_length, border_mode='same', init=init),
# name='conv6',
# inputs=['relu3', 'relu5'],
# merge_mode='concat', concat_axis=-1)
#model.add_node(Activation(activation), name='relu6', input='conv6')
conv6 = merge([relu3, relu5], mode='concat', concat_axis=1)
relu6 = (Activation(activation))(conv6)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter,filter_length=filter_length, border_mode='same', init=init),
# name='conv7',
# inputs=['relu2', 'relu6'],
# merge_mode='concat', concat_axis=-1)
#model.add_node(Activation(activation), name='relu7', input='conv7')
conv7 = merge([relu2, relu6], mode='concat', concat_axis=1)
relu7 = (Activation(activation))(conv7)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter,filter_length=filter_length, border_mode='same', init=init),
# name='conv8',
# inputs=['relu1', 'relu7'],
# merge_mode='concat', concat_axis=-1)
#model.add_node(Activation(activation), name='relu8', input='conv8')
conv8 = merge([relu1, relu7], mode='concat', concat_axis=1)
relu8 = (Activation(activation))(conv8)
#########################################################
if regression:
#########################################################
#model.add_node(Convolution1D(nb_filter=output_dim, filter_length=sequence_length, border_mode='same', init=init), name='conv9', input='relu8')
#model.add_output(name='output', input='conv9')
conv9 = Convolution1D(nb_filter=output_dim, filter_length=sequence_length, border_mode='same', init=init)(relu8)
output = conv9
#main_output = conv9.output
e |
qedsoftware/commcare-hq | corehq/apps/toggle_ui/urls.py | Python | bsd-3-clause | 291 | 0.003436 | from django.conf.urls import | url
from corehq.apps.toggle_ui.views import ToggleListView, ToggleEditView
urlpatterns = [
url(r'^$', ToggleListView.as_view(), name=ToggleListView.urlname),
url(r'^edit/(?P<toggle>[\w_-]+)/$', ToggleEditView.as_view(), name=T | oggleEditView.urlname),
]
|
samhoo/askbot-realworld | askbot/management/commands/askbot_add_test_content.py | Python | gpl-3.0 | 10,587 | 0.007368 | from django.core.management.base import NoArgsCommand
from askbot.models import User
from optparse import make_option
from askbot.utils.console import choice_dialog
NUM_USERS = 40
# KEEP NEXT 3 SETTINGS LESS THAN OR EQUAL TO NUM_USERS!
NUM_QUESTIONS = 40
NUM_ANSWERS = 20
NUM_COMMENTS = 20
# To ensure that all the actions can be made, repute each user high positive
# karma. This can be calculated dynamically - max of MIN_REP_TO_... settings
INITIAL_REPUTATION = 500
# Defining template inputs.
USERNAME_TEMPLATE = "test_user_%s"
PASSWORD_TEMPLATE = "test_password_%s"
EMAIL_TEMPLATE = "test_user_%s@askbot.org"
TITLE_TEMPLATE = "Test question title No.%s"
TAGS_TEMPLATE = ["tag-%s-0", "tag-%s-1"] # len(TAGS_TEMPLATE) tags per question
CONTENT_TEMPLATE = """Lorem lean startup ipsum product market fit customer
development acquihire technical cofounder. User engagement
**A/B** testing *shrink* a market venture capital pitch."""
ANSWER_TEMPLATE = """Accelerator photo sharing business school drop out ramen
hustle crush it revenue traction platforms."""
COMMENT_TEMPLATE = """Main differentiators business model micro economics
marketplace equity augmented reality human computer"""
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--noinput', action='store_false', dest='interactive', default=True,
help='Do not prompt the user for input of any kind.'),
)
def print_if_verbose(self, text):
"Only print if user chooses verbose output"
if self.verbosity > 0:
print text
def create_users(self):
"Create the users and return an array of created users"
users = []
#add admin with the same password
admin = User.objects.create_user('admin', 'admin@example.com')
admin.set_password('admin')
self.print_if_verbose("Created User 'admin'")
users.append(admin)
# Keeping the created users in array - we will iterate over them
# several times, we don't want querying the model each and every time.
for i in range(NUM_USERS):
s_idx = str(i)
user = User.objects.create_user(USERNAME_TEMPLATE % s_idx,
EMAIL_TEMPLATE % s_idx)
user.set_password(PASSWORD_TEMPLATE % s_idx)
user.reputation = INITIAL_REPUTATION
user.save()
self.print_if_verbose("Created User '%s'" % user.username)
users.append(user)
return users
def create_questions(self, users):
"Create the questions and return the last one as active question"
# Keeping the last active question entry for later use. Questions API
# might change, so we rely solely on User data entry API.
active_question = None
last_vote = False
# Each user posts a question
for user in users[:NUM_QUESTIONS]:
# Downvote/upvote the questions - It's reproducible, yet
# gives good randomized data
if not active_question is None:
if last_vote:
user.downvote(active_question)
self.print_if_verbose("%s downvoted a question"%(
user.username
))
else:
user.upvote(active_question)
self.print_if_verbose("%s upvoted a question"%(
user.username
))
last_vote = ~last_vote
# len(TAGS_TEMPLATE) tags per question - each tag is different
tags = " ".join([t%user.id for t in TAGS_TEMPLATE])
active_question = user.post_question(
title = TITLE_TEMPLATE % user.id,
body_text = CONTENT_TEMPLATE,
tags = tags,
)
self.print_if_verbose("Created Question '%s' with tags: '%s'" % (
active_question.title, tags,)
)
return active_question
def create_answers(self, users, active_question):
"Create the answers for the active question, return the active answer"
active_answer = None
last_vote = False
# Now, fill the last added question with answers
for user in users[:NUM_ANSWERS]:
# We don't need to test for data validation, so ONLY users
# that aren't authors can post answer to the question
if not active_question.author is user:
# Downvote/upvote the answers - It's reproducible, yet
# gives good randomized data
if not active_answer is None:
if last_vote:
user.downvote(active_answer)
self.print_if_verbose("%s downvoted an answer"%(
user.username
))
else:
user.upvote(active_answer)
| self.print_if_verbose("%s upvoted an answer"%(
user.username
))
last_vote = ~last_vote
active_answer = user.post_answer(
ques | tion = active_question,
body_text = ANSWER_TEMPLATE,
follow = True
)
self.print_if_verbose("%s posted an answer to the active question"%(
user.username
))
# Upvote the active question
user.upvote(active_question)
# Follow the active question
user.follow_question(active_question)
self.print_if_verbose("%s followed the active question"%(
user.username)
)
# Subscribe to the active question
user.subscribe_for_followed_question_alerts()
self.print_if_verbose("%s subscribed to followed questions"%(
user.username)
)
return active_answer
def create_comments(self, users, active_question, active_answer):
"""Create the comments for the active question and the active answer,
return 2 active comments - 1 question comment and 1 answer comment"""
active_question_comment = None
active_answer_comment = None
for user in users[:NUM_COMMENTS]:
active_question_comment = user.post_comment(
parent_post = active_question,
body_text = COMMENT_TEMPLATE
)
self.print_if_verbose("%s posted a question comment"%user.username)
active_answer_comment = user.post_comment(
parent_post = active_answer,
body_text = COMMENT_TEMPLATE
)
self.print_if_verbose("%s posted an answer comment"%user.username)
# Upvote the active answer
user.upvote(active_answer)
# Upvote active comments
if active_question_comment and active_answer_comment:
num_upvotees = NUM_COMMENTS - 1
for user in users[:num_upvotees]:
user.upvote(active_question_comment)
user.upvote(active_answer_comment)
return active_question_comment, active_answer_comment
def handle_noargs(self, **options):
self.verbosity = int(options.get("verbosity", 1))
self.interactive = options.get("interactive")
if self.interactive:
answer = choice_dialog("This command |
youtube/cobalt | third_party/devtools/scripts/build/dependency_preprocessor.py | Python | bsd-3-clause | 3,349 | 0.00209 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This ensures that each front-end module does not accidentally rely on a module
that isn't listed as a transitive dependency in the module.json.
How this works:
1. Renames any potential undeclared namespace usage across the entire front-end code
(e.g. identifiers, strings) into e.g. "$$UndeclaredDependency_SDK$$.Foo".
2. Closure Compiler catches any illegal usage and safely ignores coincidental
usages (e.g. "Console.Foo" in a string).
"""
import codecs
import multiprocessing
from os import path
import re
import shutil
import special_case_namespaces
try:
import simplejson as json
except ImportError:
import json
class DependencyPreprocessor(object):
def __init__(self, descriptors, temp_frontend_path, devtools_frontend_path):
self.descriptors = descriptors
self.temp_frontend_path = temp_frontend_path
self.module_descript | ors = descriptors.modules
self.modules = set(self.descriptors.sorted_modules())
shutil.copytree(devtools_frontend_path, self.temp_frontend_path)
self._special_case_namespaces = special_case_namespaces.special_case_namespaces
def enforce_dependencies(self):
arg_list = []
for module in self.modules:
dependencies = set(self.descri | ptors.sorted_dependencies_closure(module))
excluded_modules = self.modules - {module} - dependencies
excluded_namespaces = [self._map_module_to_namespace(m) for m in excluded_modules]
file_paths = [
path.join(self.temp_frontend_path, module, file_name)
for file_name in self.descriptors.module_compiled_files(module)
]
arg = {
'excluded_namespaces': excluded_namespaces,
'file_paths': file_paths,
}
arg_list.append(arg)
parallelize(poison_module, arg_list)
def _map_module_to_namespace(self, module):
return self._special_case_namespaces.get(module, self._to_camel_case(module))
def _to_camel_case(self, snake_string):
components = snake_string.split('_')
return ''.join(x.title() for x in components)
def poison_module(target):
excluded_namespaces = target['excluded_namespaces']
file_paths = target['file_paths']
for file_path in file_paths:
with codecs.open(file_path, 'r', 'utf-8') as file:
file_contents = file.read()
file_contents = poison_contents_for_namespaces(file_contents, excluded_namespaces)
with codecs.open(file_path, 'w', 'utf-8') as file:
file.write(file_contents)
def poison_contents_for_namespaces(file_contents, namespaces):
# Technically, should be [^.]\s*\b + NAMESPACES + \b\s*[^:]
# but we rely on clang-format to format like this:
# SomeModule
# .Console
regex = r'([^.]\b)(' + '|'.join(namespaces) + r')(\b[^:])'
replace = r'\1$$UndeclaredDependency_\2$$\3'
return re.sub(regex, replace, file_contents)
def parallelize(fn, arg_list):
number_of_processes = min(multiprocessing.cpu_count(), 8)
pool = multiprocessing.Pool(number_of_processes)
pool.map(fn, arg_list)
pool.close()
pool.join()
|
bmcfee/mir_eval | evaluators/melody_eval.py | Python | mit | 2,538 | 0 | #!/usr/bin/env python
'''
CREATED:2014-03-18 by Justin Salamon <justin.salamon@nyu.edu>
Compute melody extraction evaluation measures
Usage:
./melody_eval.py TRUTH.TXT PREDICTION.TXT
(CSV files also accepted)
For a detailed explanation of the measures please refer to:
J. Salamon, E. Gomez, D. P. W. Ellis and G. Richard, "Melody Extraction
from Polyphonic Music Signals: Approaches, Applications and Challenges",
IEEE Signal Processing Magazine, 31(2):118-134, Mar. 2014.
'''
from __future__ import print_function
import argparse
import sys
import os
import eval_utilities
import mir_eval
def process_arguments():
'''Argparse function to get the program parameters'''
parser = argparse.ArgumentParser(description='mir_eval melody extraction '
'evaluation')
parser.add_argument('-o',
dest='output_file',
default=None,
type=str,
action='store',
help='Store results in json format')
parser.add_argument('reference_file',
action='store',
help='path to the ground truth annotation')
parser.add_argument('estimated_file',
action='store',
help='path to the estimation file')
parser.add_argument("--hop",
dest='hop',
type=float,
default=None,
help="hop size (in seconds) to use for the evaluation"
" (optional)")
return vars(parser.parse_args(sys.argv[1:]))
if __name__ == '__main__':
# Get the parameters
parameters = process_arguments()
# Load in the data from the provided files
(ref_time,
ref_freq) = mir_eval.io.load_time_series(parameters['reference_file'])
(est_time,
est_freq) = mir_eval.io.load_time_series(parameters['estimated_file'] | )
# Compute all the scores
scores = mir_eval.melody.evaluate(ref_time, ref_freq, est_time, est_freq,
hop=parameters['hop'])
print("{} vs | . {}".format(os.path.basename(parameters['reference_file']),
os.path.basename(parameters['estimated_file'])))
eval_utilities.print_evaluation(scores)
if parameters['output_file']:
print('Saving results to: ', parameters['output_file'])
eval_utilities.save_results(scores, parameters['output_file'])
|
xtacocorex/chip-python-aREST | examples/chip-arest-cloud.py | Python | mit | 734 | 0.013624 | #!/usr/bin/env python
# Copyright (c) 2017 Robert Wolterman
# Basic example using the CHIP aREST API
# Module Imports
import CHIP_aREST.aREST as aREST
# Setup the Device Info
aREST.set_id(aREST.make_id(6))
# or
#aREST.set_id("Xy56E1")
aREST.set_name("MY_CLOUDED_CHIP")
# In the future, this would be used to | different | iate a CHIP and CHIP Pro
aREST.set_hardware("chip")
# Variable Examples
temperature = 21.2
humidity = 95.2
aREST.variable("temperature",temperature)
aREST.variable("humidity",humidity)
# Function Example
def myfunction():
return "hello from myfunction cloud!"
aREST.function("functiontest",myfunction)
# Connect to the aRest.io
#aREST.connect()
# Start
aREST.RestApp(host="0.0.0.0",port=3000,debug=True)
|
D4wN/brickv | src/build_data/windows/OpenGL/GL/NV/half_float.py | Python | gpl-2.0 | 1,383 | 0.016631 | '''OpenGL extension NV.half_float
This module customises the behaviour of the
OpenGL.raw.GL.NV.half_float to provide a more
Python-friendly API
Overview (from the spec)
This extension introduces a new storage format and data type for
half-precision (16-bit) floating-point quantities. The floating-point
format is very similar to the IEEE single-precision floating-point
standard, except that it has only 5 exponent bits and 10 mantissa bits.
Half-precision floats are smaller than full precision floats and provide a
larger dynamic range than similarly-sized normalized scalar data types.
This extension allows applications to use half-precision floating point
data when specifying vertices or pixel data. It adds new commands to
specify vertex attributes using the new data type, and extends the
existing vertex array and image specification commands to accept the new
data type.
This storage format is also used to represent 16-bit components in the
floating-point frame buffers, as defined in the NV_float_buffer extension.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/NV/half_float.txt
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extension | s, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.NV.half_float import *
### END AUTOGENERATED SECTION | |
douwevandermeij/cmsplugin-filer | cmsplugin_filer_folder/cms_plugins.py | Python | bsd-3-clause | 2,258 | 0.000886 | from cms.plugin_pool import plugin_pool
from cms.plugin_base import CMSPluginBase
from django.template import loader
from django.template.loader import select_template
from django.utils.translation import ugettext_lazy as _
from . import models
from .conf import settings
from filer.models.imagemodels import Image
class FilerFolderPlugin(CMSPluginBase):
module = 'Filer'
model = mod | els.FilerFolder
name = _("Folder")
TEMPLATE_NAME = 'cmsplugin_filer_folder/plugins/folder/%s.html'
render_template = TEMPLATE_NAME % 'default'
text_enabled = False
admin_preview = False
fieldsets = (
(None, {'fields': ['title', 'folder']}),
)
if settings.CMSPLUGIN_FILER_FOL | DER_STYLE_CHOICES:
fieldsets[0][1]['fields'].append('style')
def get_folder_files(self, folder, user):
qs_files = folder.files.filter(image__isnull=True)
if user.is_staff:
return qs_files
else:
return qs_files.filter(is_public=True)
def get_folder_images(self, folder, user):
qs_files = folder.files.instance_of(Image)
if user.is_staff:
return qs_files
else:
return qs_files.filter(is_public=True)
def get_children(self, folder):
return folder.get_children()
def render(self, context, instance, placeholder):
self.render_template = select_template((
'cmsplugin_filer_folder/folder.html', # backwards compatibility. deprecated!
self.TEMPLATE_NAME % instance.style,
self.TEMPLATE_NAME % 'default')
).template
folder_files = self.get_folder_files(instance.folder,
context['request'].user)
folder_images = self.get_folder_images(instance.folder,
context['request'].user)
folder_folders = self.get_children(instance.folder)
context.update({
'object': instance,
'folder_files': sorted(folder_files),
'folder_images': sorted(folder_images),
'folder_folders': folder_folders,
'placeholder': placeholder
})
return context
plugin_pool.register_plugin(FilerFolderPlugin)
|
martadesimone/Protoplanetarydisks | linmix/linmix/__init__.py | Python | gpl-2.0 | 117 | 0.008547 | """ A hierarchical Bayesian approach to linea | r regression with | error in both X and Y.
"""
from linmix import LinMix
|
flp9001/vila-paraiso | vilaparaiso/vilaparaiso/settings.py | Python | gpl-3.0 | 5,294 | 0.003967 | """
Django settings for vilaparaiso project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
import environ
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ROOT_DIR = environ.Path(__file__) - 2 # (vilaparaiso/vilaparaiso/settings.py - 2 = vilaparaiso/)
APPS_DIR = ROOT_DIR.path('vilaparaiso')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'rs6+(drm2ngd-b=tpu#fkpqyixcew-!7y*z9yj&vgy^f$#keo='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['.vilaparaiso.org', '192.241.215.206', '127.0.0.1']
# Application definition
DJANGO_APPS = [
'jet', # http://jet.readthedocs.io/
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
THIRD_PARTY_APPS = [
'mptt',
'cities_light',
'django_extensions',
]
# Apps specific for this project go here.
LOCAL_APPS = [
'website.apps.WebsiteConfig',
'cities.apps.CitiesConfig', # custom cities app
'users.apps.UsersConfig',
]
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'vilaparaiso.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
str(APPS_DIR.path('templates')),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'vilaparaiso.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
#DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
#}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'vilaparaiso',
'USER': 'django',
'PASSWORD': 'xuxubeleza',
'HOST': 'localhost',
'PORT': '',
}
}
#DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'vilaparaiso.sqlite3'),
# }
#}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
MEDIA_URL = '/media/'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
#STATICFILES_DIRS = [
# os.path.join(BASE_DIR, "static"),
#]
STATIC_ROOT = os.path.join(BASE_DIR, 'static/')
AU | TH_USER_MODEL = 'users.User'
#CITIES_LIGHT
#http://download.geonames.org/export/dump/iso-languagecodes.txt
CITIES_LIGHT_APP_NAME = 'cities'
CITIES_LIGHT_TRANSLATION_LA | NGUAGES = ['en', 'es', 'pt', 'abbr']
#CITIES_LIGHT_INCLUDE_COUNTRIES = ['BR']
JET_THEMES = [
{
'theme': 'default', # theme folder name
'color': '#47bac1', # color of the theme's button in user menu
'title': 'Default' # theme title
},
{
'theme': 'green',
'color': '#44b78b',
'title': 'Green'
},
{
'theme': 'light-green',
'color': '#2faa60',
'title': 'Light Green'
},
{
'theme': 'light-violet',
'color': '#a464c4',
'title': 'Light Violet'
},
{
'theme': 'light-blue',
'color': '#5EADDE',
'title': 'Light Blue'
},
{
'theme': 'light-gray',
'color': '#222',
'title': 'Light Gray'
}
]
JET_DEFAULT_THEME = 'light-violet'
|
suutari/shoop | shuup_tests/utils/basketish_order_source.py | Python | agpl-3.0 | 355 | 0 | # -*- coding: utf-8 -*-
# This file is par | t of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tr | ee.
from shuup.core.order_creator import OrderSource
class BasketishOrderSource(OrderSource):
pass
|
venicegeo/eventkit-cloud | scripts/thematic_test.py | Python | bsd-3-clause | 5,667 | 0.004764 | """
Harness for running thematic transformations against a spatialite db.
From the project directory run:
./manage.py runscript thematic_test --settings=hot_exports.settings -v2
Depends on django-extensions.
"""
import logging
import os
import shutil
from string import Template
import sqlite3
from eventkit_cloud.jobs.models import Job
logger = logging.getLogger(__name__)
thematic_spec = {
'amenities_all_points': {'type': 'point', 'key': 'amenity', 'table': 'planet_osm_point', 'select_clause': 'amenity is not null'},
'amenities_all_polygons': {'type': 'polygon', 'key': 'amenity', 'table': 'planet_osm_polygon', 'select_clause': 'amenity is not null'},
'health_schools_points': {'type': 'point', 'key': 'amenity', 'table': 'planet_osm_point', 'select_clause': 'amenity="clinic" OR amenity="hospital" OR amenity="school" OR amenity="pharmacy"'},
'health_schools_polygons': {'key': 'amenity', 'table': 'planet_osm_polygon', 'select_clause': 'amenity="clinic" OR amenity="hospital" OR amenity="school" OR amenity="pharmacy"'},
'airports_all_points': {'key': 'aeroway', 'table': 'planet_osm_point', 'select_clause': 'aeroway is not null'},
'airports_all_polygons': {'key': 'aeroway', 'table': 'planet_osm_polygon', 'select_clause': 'aeroway is not null'},
'villages_points': {'key': 'place', 'table': 'planet_osm_point', 'select_clause': 'place is not null'},
'buildings_polygons': {'key': 'building', 'table': 'planet_osm_polygon', 'select_clause': 'building is not null'},
'natural_polygons': {'key': 'natural', 'table': 'planet_osm_polygon', 'select_clause': 'natural is not null'},
'natural_lines': {'key': 'natural', 'table': 'planet_osm_line', 'select_clause': 'natural is not null'},
'landuse_other_polygons': {'key': 'landuse', 'table': 'planet_osm_polygon', 'select_clause': 'landuse is not null AND landuse!="residential"'},
'landuse_residential_polygons': {'key': 'landuse', 'table': 'planet_osm_polygon', 'select_clause': 'landuse is not null AND landuse="residential"'},
'roads_paths_lines': {'key': 'highway', 'table': 'planet_osm_line', 'select_clause': 'highway is not null'},
'waterways_lines': {'key': 'waterway', 'table': 'planet_osm_line', 'select_clause': 'waterway is not null'},
'towers_antennas_points': {'key': 'man_made', 'table': 'planet_osm_point', 'select_clause': 'man_made="antenna" OR man_made="mast" OR man_made="tower"'},
'harbours_points': {'key': 'harbour', 'table': 'planet_osm_point', 'select_clause': 'harbour is not null'},
'grassy_fields_polygons': {'key': 'leisure', 'table': 'planet_osm_polygon', 'select_clause': 'leisure="pitch" OR leisure="common" OR leisure="golf_course"'},
}
def run(*script_args):
sqlite = '/home/ubuntu/export_downloads/5c095634-1591-4f31-aa75-b6a7952b29e9/query.sqlite'
thematic = '/home/ubuntu/export_downloads/5c095634-1591-4f31-aa75-b6a7952b29e9/thematic.sqlite'
# get the job tags
job = Job.objects.get(uid='0a835fe4-1fbc-43ad-ab37-940fab415085')
tags = job.categorised_tags
# create the thematic sqlite file (a copy of the original)
thematic_sqlite = shutil.copy(sqlite, thematic)
assert os.path.exists(thematic), 'Thematic sqlite file not found.'
# setup sqlite connection
conn = sqlite3.connect(thematic)
# load spatialite extension
conn.enable_load_extension(True)
cmd = "SELECT load_extension('libspatialite')"
cur = conn.cursor()
cur.execute(cmd)
geom_types = {'points': 'POINT', 'lines': 'LINESTRING', 'polygons': 'MULTIPOLYGON'}
# create and execute thematic sql statements
sql_tmpl = Template('CREATE TABLE $tablename AS SELECT osm_id, $osm_way_id $columns, Geometry FROM $planet_table WHERE $select_clause')
recover_geom_tmpl = Template("SELECT RecoverGeometryColumn($tablename, 'GEOMETRY', 4326, $geom_type, 'XY')")
for layer, spec in thematic_spec.items():
layer_type = layer.split('_')[-1]
isPoly = layer_type == 'polygons'
osm_way_id = ''
# check if the thematic tag is in the jobs tags, if not skip this thematic layer
if not spec['key'] in tags[layer_type]:
continue
if isPoly:
osm_way_id = 'osm_way_id,'
params = {'tablename': layer, 'osm_way_id': osm_way_id,
'columns': ', '.join(tags[layer_type]),
'planet_table': spec['table'], 'select_clause': spec['select_clause']}
sql = sql_tmpl.safe_substitute(params)
print(sql)
cur.execute(sql)
geom_type = geom_types[layer_type]
recover_geom_sql = recover_geom_tmpl.safe_substitute({'tablename': "'" + layer + "'", 'geom_type': "'" + geom_type + "'"})
print(recover_geom_sql)
conn.commit()
cur.execute(recover_geom_sql)
cur.execute("SELECT CreateSpatialIndex({0}, 'GEOMETRY')".format("'" + layer + "'"))
conn.commit()
# remove existing geometry columns
cur.execute("SELECT DiscardGeometryColumn('planet_osm_point','Geometry')")
cur.execute("SELECT DiscardGeometryColumn('planet_osm_line','Geometry')")
cur.execute("SELECT DiscardGeometryColumn('planet_osm_polygon','Geometry')")
conn.commit()
# drop existing spatial indexes
cur.execute('DROP TABLE idx_ | planet_osm_point_GEOMETRY')
cur.execute('DROP TABLE idx_planet_osm_line_GEOMETRY')
cur.execute('DROP TABLE idx_planet_osm_polygon_GEOMETRY')
conn.commit()
# drop default schema tables
cur.execute('DROP TABLE planet_osm_point')
cur.execute('DROP TABLE planet_osm_line')
| cur.execute('DROP TABLE planet_osm_polygon')
conn.commit()
cur.close()
|
kupiakos/LapisMirror | plugins/e621.py | Python | mit | 4,898 | 0.000612 | # The MIT License (MIT)
# Copyright (c) 2016 HeyItsShuga
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import logging
import re
import html
from urllib.parse import urlsplit
import traceback
import requests
import mimeparse
import praw
class E621Plugin:
"""
Mirrors e621 images using either their API or using their CDN links.
Created by /u/HeyItsShuga
"""
def __init__(self, useragent: str, **options):
"""Initialize the e621 importer.
:param useragent: The useragent to use for querying e621.
:param options: Other options in the configuration. Ignored.
"""
self.log = logging.getLogger('lapis.e621')
self.headers = {'User-Agent': useragent}
self.regex = re.compile(
r'^https?://(((?:www\.)?(?:static1\.)?'
r'(?P<service>(e621)|(e926))\.net/(data/.+/(?P<md5>\w+))?'
r'(post/show/(?P<post_id>\d+)/?)?.*))$')
def import_submission(self, submission: praw.objects.Submission) -> dict:
"""Import a submission from e621.
This function will define the following values in its return data:
- author: simply "an anonymous user on e621"
- source: The url of the submission
- importer_display/header
- import_urls
After we define that, we need to get the image. Since e621 has an
API, we use that to try to get the image if the image is a non-CDN URL.
If it is a CDN URL, we take the image directory and upload *that* to
Imgur.
image_url is the variable of the image to upload.
:param submission: A reddit submission to parse.
"""
try:
url = html.unescape(submission.url)
match = self.regex.match(submission.url)
if not match:
return None
r = requests.head(url, headers=self.headers)
mime_text = r.headers.get('Content-Type')
mime = mimeparse.parse_mime_type(mime_text)
if mime[0] == 'image':
md5 = match.group('md5')
service = match.group('service')
endpoint = 'http://e926.net/post/check_md5.json?md5=' + md5
self.log.debug('Will use MD5 checker endpoint at %s', endpoint)
callapi = requests.get(endpoint)
json = callapi.json()
post_id = json['post_id']
post_id = str(post_id)
else:
self.log.debug('No CDN used, md5 retrieval not neccesary.')
# For non-CDN links, the plugin attempts to get the post_id
# out of the URL using regex.
post_id = match.group('post_id')
endpoint = 'http://e926.net/post/show.json?id=' + post_id
service = match.group('service')
self.log.debug('Will use API endpoint at %s', endpoint)
# We will use the e621 API to get the image URL.
callapi = requests.get(endpoint)
json = callapi.json()
img = json['file_url']
author = json['artist']
author = ''.join(author) # Converts the list into a string to be used later.
data = {'author': author,
'source': url,
'importer_display':
{'header': 'Mirrored [image](https://' + service + '.net/post/show/' + post_id + ') \
by ' + service + ' artist [' + author + '](https://' + service + '.net/post/index/1/' + author + '\
):\n\n'}}
image_url = img
data['import_urls'] = [image_url]
return data
except Exception:
| self.log.error('Could not import e621 URL %s (%s)',
submission.url, tr | aceback.format_exc())
return None
__plugin__ = E621Plugin
# END OF LINE.
|
xbmc/atv2 | xbmc/lib/libPython/Python/Mac/scripts/buildpkg.py | Python | gpl-2.0 | 15,904 | 0.001572 | #!/usr/bin/env python
"""buildpkg.py -- Build OS X packages for Apple's Installer.app.
This is an experimental command-line tool for building packages to be
installed with the Mac OS X Installer.app application.
It is much inspired by Apple's GUI tool called PackageMaker.app, that
seems to be part of the OS X developer tools installed in the folder
/Developer/Applications. But apparently there are other free tools to
do the same thing which are also named PackageMaker like Brian Hill's
one:
http://personalpages.tds.net/~brian_hill/packagemaker.html
Beware of the multi-package features of Installer.app (which are not
yet supported here) that can potentially screw-up your installation
and are discussed in these articles on Stepwise:
http://www.stepwise.com/Articles/Technical/Packages/InstallerWoes.html
http://www.stepwise.com/Articles/Technical/Packages/InstallerOnX.html
Beside using the PackageMaker class directly, by importing it inside
another module, say, there are additional ways of using this module:
the top-level buildPackage() function provides a shortcut to the same
feature and is also called when using this module from the command-
line.
****************************************************************
NOTE: For now you should be able to run this even on a non-OS X
system and get something similar to a package, but without
the real archive (needs pax) and bom files (needs mkbom)
inside! This is only for providing a chance for testing to
folks without OS X.
****************************************************************
TODO:
- test pre-process and post-process scripts (Python ones?)
- handle multi-volume packages (?)
- integrate into distutils (?)
Dinu C. Gherman,
gherman@europemail.com
November 2001
!! USE AT YOUR OWN RISK !!
"""
__version__ = 0.2
__license__ = "FreeBSD"
import os, sys, glob, fnmatch, shutil, string, copy, getopt
from os.path import basename, dirname, join, islink, isdir, isfile
Error = "buildpkg.Error"
PKG_INFO_FIELDS = """\
Title
Version
Description
DefaultLocation
DeleteWarning
NeedsAuthorization
DisableStop
UseUserMask
Application
Relocatable
Required
InstallOnly
RequiresReboot
RootVolumeOnly
LongFilenames
LibrarySubdirectory
AllowBackRev
OverwritePermissions
InstallFat\
"""
######################################################################
# Helpers
######################################################################
# Convenience class, as suggested by /F.
class GlobDirectoryWalker:
"A forward iterator that traverses files in a directory tree."
def __init__(self, directory, pattern="*"):
self.stack = [directory]
self.pattern = pattern
self.files = []
self.index = 0
def __getitem__(self, index):
while 1:
try:
file = self.files[self.index]
self.index = self.index + 1
except IndexError:
# pop next directory from stack
self.directory = self.stack.pop()
self.files = os.listdir(self.directory)
self.index = 0
else:
# got a filename
fullname = join(self.directory, file)
if isdir(fullname) and not islink(fullname):
self.stack.append(fullname)
if fnmatch.fnmatch(file, self.pattern):
return fullname
######################################################################
# The real thing
######################################################################
class PackageMaker:
"""A class to generate packages for Mac OS X.
This is intended to create OS X packages (with extension .pkg)
containing archives of arbitrary files that the Installer.app
will be able to handle.
As of now, PackageMaker instances need to be created with the
title, version and description of the package to be built.
The package is built after calling the instance method
build(root, **options). It has the same name as the constructor's
title argument plus a '.pkg' extension and is located in the same
parent folder that contains the root folder.
E.g. this will create a package folder /my/space/distutils.pkg/:
pm = PackageMaker("distutils", "1.0.2", "Python distutils.")
pm.build("/my/space/distutils")
"""
packageInfoDefaults = {
'Title': None,
'Version': None,
'Description': '',
'DefaultLocation': '/',
'DeleteWarning': '',
'NeedsAuthorization': 'NO',
'DisableStop': 'NO',
'UseUserMask': 'YES',
'Application': 'NO',
'Relocatable': 'YES',
'Required': 'NO',
'InstallOnly': 'NO',
'RequiresReboot': 'NO',
'RootVolumeOnly' : 'NO',
'InstallFat': 'NO',
'LongFilenames': 'YES',
'LibrarySubdirectory': 'Standard',
'AllowBackRev': 'YES',
'OverwritePermissions': 'NO',
}
def __init__(self, title, version, desc):
"Init. with mandatory title/version/description arguments."
info = {"Title": title, "Version": version, "Description": desc}
self.packageInfo = copy.deepcopy(self.packageInfoDefaults)
self.packageInfo.update(info)
# variab | les set later
self.packageRootFolder = None
self.packageResourceFolder = None
self.sourceFolder = None
self.resourceFolder = None
def build(self, root, resources=Non | e, **options):
"""Create a package for some given root folder.
With no 'resources' argument set it is assumed to be the same
as the root directory. Option items replace the default ones
in the package info.
"""
# set folder attributes
self.sourceFolder = root
if resources == None:
self.resourceFolder = root
else:
self.resourceFolder = resources
# replace default option settings with user ones if provided
fields = self. packageInfoDefaults.keys()
for k, v in options.items():
if k in fields:
self.packageInfo[k] = v
elif not k in ["OutputDir"]:
raise Error, "Unknown package option: %s" % k
# Check where we should leave the output. Default is current directory
outputdir = options.get("OutputDir", os.getcwd())
packageName = self.packageInfo["Title"]
self.PackageRootFolder = os.path.join(outputdir, packageName + ".pkg")
# do what needs to be done
self._makeFolders()
self._addInfo()
self._addBom()
self._addArchive()
self._addResources()
self._addSizes()
self._addLoc()
def _makeFolders(self):
"Create package folder structure."
# Not sure if the package name should contain the version or not...
# packageName = "%s-%s" % (self.packageInfo["Title"],
# self.packageInfo["Version"]) # ??
contFolder = join(self.PackageRootFolder, "Contents")
self.packageResourceFolder = join(contFolder, "Resources")
os.mkdir(self.PackageRootFolder)
os.mkdir(contFolder)
os.mkdir(self.packageResourceFolder)
def _addInfo(self):
"Write .info file containing installing options."
# Not sure if options in PKG_INFO_FIELDS are complete...
info = ""
for f in string.split(PKG_INFO_FIELDS, "\n"):
if self.packageInfo.has_key(f):
info = info + "%s %%(%s)s\n" % (f, f)
info = info % self.packageInfo
base = self.packageInfo["Title"] + ".info"
path = join(self.packageResourceFolder, base)
f = open(path, "w")
f.write(info)
def _addBom(self):
"Write .bom file containing 'Bill of Materials'."
# Currently ignores if the 'mkbom' tool is not available.
try:
base = self.packageInfo["Title"] + ".bom"
bomPath = join(self.packageResourceFolder, base)
cmd = "mkbom %s %s" % (self.sourceFolder, bomPat |
GeosoftInc/gxpy | geosoft/gxapi/GXDBWRITE.py | Python | bsd-2-clause | 11,130 | 0.007907 | ### extends 'class_empty.py'
### block ClassImports
# NOTICE: Do not edit anything here, it is generated code
from . import gxapi_cy
from geosoft.gxapi import GXContext, float_ref, int_ref, str_ref
from .GXDB import GXDB
from .GXVA import GXVA
from .GXVV import GXVV
### endblock ClassImports
### block Header
# NOTICE: The code generator will not replace the code in this block
### endblock Header
### block ClassImplementation
# NOTICE: Do not edit anything here, it is generated code
class GXDBWRITE(gxapi_cy.WrapDBWRITE):
"""
GXDBWRITE class.
The `GXDBWRITE <geosoft.gxapi.GXDBWRITE>` class is used to open and write to databases. Large blocks of data
are split into blocks and served up sequentially to prevent the over-use of virtual memory when VVs or VAs are being written to channels.
Individual data blocks are limited by default to 1 MB (which is user-alterable). Data less than the block size
are served up whole, one block per line.
"""
def __init__(self, handle=0):
super(GXDBWRITE, self).__init__(GXContext._get_tls_geo(), handle)
@classmethod
def null(cls):
"""
A null (undefined) instance of `GXDBWRITE <geosoft.gxapi.GXDBWRITE>`
:returns: A null `GXDBWRITE <geosoft.gxapi.GXDBWRITE>`
:rtype: GXDBWRITE
"""
return GXDBWRITE()
def is_null(self):
"""
Check if this is a null (undefined) instance
:returns: True if this is a null (undefined) instance, False otherwise.
:rtype: bool
"""
return self._internal_handle() == 0
# Create Methods
@classmethod
def create(cls, db):
"""
Create a `GXDBWRITE <geosoft.gxapi.GXDBWRITE>` object
Add channels using the `add_channel <geosoft.gxapi.GXDBWRITE.add_channel>` method.channel.
:param db: Database input
:type db: GXDB
:returns: `GXDBWRITE <geosoft.gxapi.GXDBWRITE>` object
:rtype: GXDBWRITE
.. versionadded:: 9.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val = gxapi_cy.WrapDBWRITE._create(GXContext._get_tls_geo(), | db)
return GXDBWRITE(ret_val)
@classmethod
def create_xy(cls, db):
"""
Create a `GXDBWRITE <geosoft.gxapi.GXDBWRITE>` object for a XY-located data. Add channels using the
`add_channel <geosoft.gxapi.GXDBWRITE.add_channel>` method.
:param db: Database input
:type db: GXDB
:returns: `GXDBWRITE <geosoft.gxapi.GXDBWRITE>` object
:rtype: GXDBWRITE
.. versionadded:: 9.0
**License:** | `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val = gxapi_cy.WrapDBWRITE._create_xy(GXContext._get_tls_geo(), db)
return GXDBWRITE(ret_val)
@classmethod
def create_xyz(cls, db):
"""
Create a `GXDBWRITE <geosoft.gxapi.GXDBWRITE>` object for a XYZ-located data.
Add channels using the `add_channel <geosoft.gxapi.GXDBWRITE.add_channel>` method.channel
:param db: Database input
:type db: GXDB
:returns: `GXDBWRITE <geosoft.gxapi.GXDBWRITE>` object
:rtype: GXDBWRITE
.. versionadded:: 9.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val = gxapi_cy.WrapDBWRITE._create_xyz(GXContext._get_tls_geo(), db)
return GXDBWRITE(ret_val)
def add_channel(self, chan):
"""
Add a data channel to the `GXDBWRITE <geosoft.gxapi.GXDBWRITE>` object.
:param chan: Channel handle (does not need to be locked, but can be.)
:type chan: int
:returns: Channel index. Use for getting the correct `GXVV <geosoft.gxapi.GXVV>` or `GXVA <geosoft.gxapi.GXVA>` object.
:rtype: int
.. versionadded:: 9.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val = self._add_channel(chan)
return ret_val
# Data Access Methods
def get_db(self):
"""
Get the output `GXDB <geosoft.gxapi.GXDB>` handle from the `GXDBWRITE <geosoft.gxapi.GXDBWRITE>` object.
:returns: `GXDB <geosoft.gxapi.GXDB>` handle
:rtype: GXDB
.. versionadded:: 9.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val = self._get_db()
return GXDB(ret_val)
def get_vv(self, chan):
"""
Get the `GXVV <geosoft.gxapi.GXVV>` handle for a channel.
:param chan: Index of channel to access.
:type chan: int
:returns: `GXVV <geosoft.gxapi.GXVV>` handle
:rtype: GXVV
.. versionadded:: 9.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** Call only for single-column (regular) channels. You can call the `get_chan_array_size <geosoft.gxapi.GXDBWRITE.get_chan_array_size>`
function to find the number fo columns in a given channel. The `GXVV <geosoft.gxapi.GXVV>` is filled anew for each block served up.
"""
ret_val = self._get_vv(chan)
return GXVV(ret_val)
def get_va(self, chan):
"""
Get the `GXVA <geosoft.gxapi.GXVA>` handle for an array channel.
:param chan: Index of channel to access.
:type chan: int
:returns: `GXVA <geosoft.gxapi.GXVA>` handle
:rtype: GXVA
.. versionadded:: 9.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** Call only for array (multi-column) channels. You can call the `get_chan_array_size <geosoft.gxapi.GXDBWRITE.get_chan_array_size>`
function to find the number fo columns in a given channel, or you can call `GXVA.col <geosoft.gxapi.GXVA.col>` on the returned `GXVA <geosoft.gxapi.GXVA>` handle.
The `GXVA <geosoft.gxapi.GXVA>` is filled anew for each block served up.
"""
ret_val = self._get_va(chan)
return GXVA(ret_val)
def get_v_vx(self):
"""
Get the X channel `GXVV <geosoft.gxapi.GXVV>` handle.
:returns: `GXVV <geosoft.gxapi.GXVV>` handle
:rtype: GXVV
.. versionadded:: 9.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** Only available for the CreateXY or CreateXYZ methods.
The `GXVV <geosoft.gxapi.GXVV>` is filled anew for each block served up.
"""
ret_val = self._get_v_vx()
return GXVV(ret_val)
def get_v_vy(self):
"""
Get the Y channel `GXVV <geosoft.gxapi.GXVV>` handle.
:returns: `GXVV <geosoft.gxapi.GXVV>` handle
:rtype: GXVV
.. versionadded:: 9.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** Only available for the CreateXY or CreateXYZ methods.
The `GXVV <geosoft.gxapi.GXVV>` is filled anew for each block served up.
"""
ret_val = self._get_v_vy()
return GXVV(ret_val)
def get_v_vz(self):
"""
Get the Z channel `GXVV <geosoft.gxapi.GXVV>` handle.
:returns: `GXVV <geosoft.gxapi.GXVV>` handle
:rtype: GXVV
.. versionadded:: 9.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net |
szha/mxnet | python/mxnet/gluon/model_zoo/model_store.py | Python | apache-2.0 | 6,036 | 0.00116 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
"""Model zoo for pre-trained models."""
__all__ = ['get_model_file', 'purge']
import os
import zipfile
import logging
import tempfile
import uuid
import shutil
from ..utils import download, check_sha1, replace_file
from ... import base
_model_sha1 = {name: checksum for checksum, name in [
('44335d1f0046b328243b32a26a4fbd62d9057b45', 'alexnet'),
('f27dbf2dbd5ce9a80b102d89c7483342cd33cb31', 'densenet121'),
('b6c8a95717e3e761bd88d145f4d0a214aaa515dc', 'densenet161'),
('2603f878403c6aa5a71a124c4a3307143d6820e9', 'densenet169'),
('1cdbc116bc3a1b65832b18cf53e1cb8e7da017eb', 'densenet201'),
('ed47ec45a937b656fcc94dabde85495bbef5ba1f', 'inceptionv3'),
('9f83e440996887baf91a6aff1cccc1c903a64274', 'mobilenet0.25'),
('8e9d539cc66aa5efa71c4b6af983b936ab8701c3', 'mobilenet0.5'),
('529b2c7f4934e6cb851155b22c96c9ab0a7c4dc2', 'mobilenet0.75'),
('6b8c5106c730e8750bcd82ceb75220a3351157cd', 'mobilenet1.0'),
('36da4ff1867abccd32b29592d79fc753bca5a215', 'mobilenetv2_1.0'),
('e2be7b72a79fe4a750d1dd415afedf01c3ea818d', 'mobilenetv2_0.75'),
('aabd26cd335379fcb72ae6c8fac45a70eab11785', 'mobilenetv2_0.5'),
('ae8f9392789b04822cbb1d98c27283fc5f8aa0a7', 'mobilenetv2_0.25'),
('a0666292f0a30ff61f857b0b66efc0228eb6a54b', 'resnet18_v1'),
('48216ba99a8b1005d75c0f3a0c422301a0473233', 'resnet34_v1'),
('0aee57f96768c0a2d5b23a6ec91eb08dfb0a45ce', 'resnet50_v1'),
('d988c13d6159779e907140a638c56f229634cb02', 'resnet101_v1'),
('671c637a14387ab9e2654eafd0d493d86b1c8579', 'resnet152_v1'),
('a81db45fd7b7a2d12ab97cd88ef0a5ac48b8f657', 'resnet18_v2'),
('9d6b80bbc35169de6b6edecffdd6047c56fdd322', 'resnet34_v2'),
('ecdde35339c1aadbec4f547857078e734a76fb49', 'resnet50_v2'),
('18e93e4f48947e002547f50eabbcc9c83e516aa6', 'resnet101_v2'),
('f2695542de38cf7e71ed58f02893d82bb409415e', 'resnet152_v2'),
('264ba4970a0cc87a4f15c96e25246a1307caf523', 'squeezenet1.0'),
('33ba0f93753c83d86e1eb397f38a667eaf2e9376', 'squeezenet1.1'),
('dd221b160977f36a53f464cb54648d227c707a05', 'vgg11'),
('ee79a8098a91fbe05b7a973fed2017a6117723a8', 'vgg11_bn'),
('6bc5de58a05a5e2e7f493e2d75a580d83efde38c', 'vgg13'),
('7d97a06c3c7a1aecc88b6e7385c2b373a249e95e', 'vgg13_bn'),
('e660d4569ccb679ec68f1fd3cce07a387252a90a', 'vgg16'),
('7f01cf050d357127a73826045c245041b0df7363', 'vgg16_bn'),
('ad2f660d101905472b83590b59708b71ea22b2e5', 'vgg19'),
('f360b758e856f1074a85abd5fd873ed1d98297c3', 'vgg19_bn')]}
apache_repo_url = 'https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/'
_url_format = '{repo_url}gluon/models/{file_name}.zip'
def short_hash(name):
if name not in _model_sha1:
raise ValueError('Pretrained model for {name} is not available.'.format(name=name))
return _model_sha1[name][:8]
def get_model_file(name, root=os.path.join(base.data_dir(), 'models')):
r"""Return location for the pretrained on local file system.
This function will download from online model zoo when model cannot be found or has mismatch.
The root directory will be created if it doesn't exist.
Parameters
----------
name : str
Name of the model.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
Returns
-------
file_path
Pa | th to the requested pretrained model file.
"""
| file_name = '{name}-{short_hash}'.format(name=name,
short_hash=short_hash(name))
root = os.path.expanduser(root)
file_path = os.path.join(root, file_name+'.params')
sha1_hash = _model_sha1[name]
if os.path.exists(file_path):
if check_sha1(file_path, sha1_hash):
return file_path
else:
logging.warning('Mismatch in the content of model file detected. Downloading again.')
else:
logging.info('Model file not found. Downloading to %s.', file_path)
os.makedirs(root, exist_ok=True)
repo_url = os.environ.get('MXNET_GLUON_REPO', apache_repo_url)
if repo_url[-1] != '/':
repo_url = repo_url + '/'
random_uuid = str(uuid.uuid4())
temp_zip_file_path = os.path.join(root, file_name+'.zip'+random_uuid)
download(_url_format.format(repo_url=repo_url, file_name=file_name),
path=temp_zip_file_path, overwrite=True)
with zipfile.ZipFile(temp_zip_file_path) as zf:
temp_dir = tempfile.mkdtemp(dir=root)
zf.extractall(temp_dir)
temp_file_path = os.path.join(temp_dir, file_name+'.params')
replace_file(temp_file_path, file_path)
shutil.rmtree(temp_dir)
os.remove(temp_zip_file_path)
if check_sha1(file_path, sha1_hash):
return file_path
else:
raise ValueError('Downloaded file has different hash. Please try again.')
def purge(root=os.path.join(base.data_dir(), 'models')):
r"""Purge all pretrained model files in local file store.
Parameters
----------
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
"""
root = os.path.expanduser(root)
files = os.listdir(root)
for f in files:
if f.endswith(".params"):
os.remove(os.path.join(root, f))
|
prasanna08/oppia | extensions/answer_summarizers/models.py | Python | apache-2.0 | 14,017 | 0.000285 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for calculations to get interaction answer views.
Calculations are performed on recor | ded state answers.
NOTE TO DEVELOPERS: To specify calculations desired for an interaction named
<INTERACTION_NAME>, edit
extensions.interactions.<INTERACTION_NAME>.answer_visualizations
This is a list of visualizations, each of which is specified by a dict with keys
'id', 'options' and 'calculation_id'. An example for a single visualization and
calculation may look like this:
|
answer_visualizations = [{
'id': 'SortedTiles',
'options': {
'use_percentages': True,
'header': 'Pretty Tiles!',
},
'calculation_id': 'AnswerFrequencies',
}]
"""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import collections
import itertools
import operator
from core.domain import exp_domain
from core.domain import stats_domain
import feconf
import python_utils
import utils
CLASSIFICATION_CATEGORIES = frozenset([
exp_domain.EXPLICIT_CLASSIFICATION,
exp_domain.TRAINING_DATA_CLASSIFICATION,
exp_domain.STATISTICAL_CLASSIFICATION,
exp_domain.DEFAULT_OUTCOME_CLASSIFICATION,
])
UNRESOLVED_ANSWER_CLASSIFICATION_CATEGORIES = frozenset([
exp_domain.STATISTICAL_CLASSIFICATION,
exp_domain.DEFAULT_OUTCOME_CLASSIFICATION,
])
class HashableAnswer(python_utils.OBJECT):
"""Wraps answer with object that can be placed into sets and dicts."""
def __init__(self, answer):
self.answer = answer
self.hashable_answer = utils.get_hashable_value(answer)
def __hash__(self):
return hash(self.hashable_answer)
def __eq__(self, other):
if isinstance(other, HashableAnswer):
return self.hashable_answer == other.hashable_answer
return False
def _get_top_answers_by_frequency(answers, limit=None):
"""Computes the number of occurrences of each answer, keeping only the top
limit answers, and returns an AnswerFrequencyList.
This method is run from within the context of a MapReduce job.
Args:
answers: iterable(*). The collection of answers to be tallied.
limit: int or None. The maximum number of answers to return. When None,
all answers are returned.
Returns:
stats_domain.AnswerFrequencyList. A list of the top "limit" answers.
"""
answer_counter = utils.OrderedCounter(HashableAnswer(a) for a in answers)
return stats_domain.AnswerFrequencyList([
stats_domain.AnswerOccurrence(hashable_answer.answer, frequency)
for hashable_answer, frequency in answer_counter.most_common(n=limit)
])
def _get_top_unresolved_answers_by_frequency(
answers_with_classification, limit=None):
"""Computes the list of unresolved answers by keeping track of their latest
classification categorization and then computes the occurrences of each
unresolved answer, keeping only limit answers, and returns an
AnswerFrequencyList.
This method is run from within the context of a MapReduce job.
Args:
answers_with_classification: iterable(*). The collection of answers
with their corresponding classification categorization.
limit: int or None. The maximum number of answers to return. When None,
all answers are returned.
Returns:
stats_domain.AnswerFrequencyList. A list of the top "limit"
unresolved answers.
"""
classification_results_dict = {}
# The list of answers is sorted according to the time of answer submission.
# Thus following loop goes through the list and aggregates the most recent
# classification categorization of each answer.
for ans in answers_with_classification:
frequency = 0
if HashableAnswer(ans['answer']) in classification_results_dict:
frequency = classification_results_dict[HashableAnswer(
ans['answer'])]['frequency']
classification_results_dict[HashableAnswer(ans['answer'])] = {
'classification_categorization': (
ans['classification_categorization']),
'frequency': frequency + 1
}
unresolved_answers_with_frequency_list = [{
'answer': ans.answer,
'frequency': val['frequency']
} for ans, val in classification_results_dict.items() if val[
'classification_categorization'] in (
UNRESOLVED_ANSWER_CLASSIFICATION_CATEGORIES)]
unresolved_answers_with_frequency_list.sort(
key=lambda x: x['frequency'], reverse=True)
return stats_domain.AnswerFrequencyList([
stats_domain.AnswerOccurrence(item['answer'], item['frequency'])
for item in unresolved_answers_with_frequency_list[:limit]
])
class BaseCalculation(python_utils.OBJECT):
"""Base calculation class.
This is the superclass for all calculations used to generate interaction
answer views.
"""
@property
def id(self):
"""The name of the class."""
return self.__class__.__name__
def calculate_from_state_answers_dict(self, state_answers_dict):
"""Perform calculation on a single StateAnswers entity. This is run in
the context of a batch MapReduce job.
This method must be overwritten in subclasses.
"""
raise NotImplementedError(
'Subclasses of BaseCalculation should implement the '
'calculate_from_state_answers_dict(state_answers_dict) method.')
class AnswerFrequencies(BaseCalculation):
"""Calculation for answers' frequencies (how often each answer was
submitted).
"""
def calculate_from_state_answers_dict(self, state_answers_dict):
"""Computes the number of occurrences of each answer, and returns a list
of dicts; each dict has keys 'answer' and 'frequency'.
This method is run from within the context of a MapReduce job.
"""
answer_dicts = state_answers_dict['submitted_answer_list']
answer_frequency_list = (
_get_top_answers_by_frequency(d['answer'] for d in answer_dicts))
return stats_domain.StateAnswersCalcOutput(
state_answers_dict['exploration_id'],
state_answers_dict['exploration_version'],
state_answers_dict['state_name'],
state_answers_dict['interaction_id'],
self.id,
answer_frequency_list)
class Top5AnswerFrequencies(BaseCalculation):
"""Calculation for the top 5 answers, by frequency."""
def calculate_from_state_answers_dict(self, state_answers_dict):
"""Computes the number of occurrences of each answer, keeping only the
top 5 answers, and returns a list of dicts; each dict has keys 'answer'
and 'frequency'.
This method is run from within the context of a MapReduce job.
"""
answer_dicts = state_answers_dict['submitted_answer_list']
answer_frequency_list = _get_top_answers_by_frequency(
(d['answer'] for d in answer_dicts), limit=5)
return stats_domain.StateAnswersCalcOutput(
state_answers_dict['exploration_id'],
state_answers_dict['exploration_version'],
state_answers_dict['state_name'],
state_answers_dict['interaction_id'],
self.id,
answer_frequency_list)
class Top10AnswerFrequencies(BaseCalculation):
"""Calculation for the top 10 answers, by frequency."""
def cal |
kashifpk/TA | fetchmail.py | Python | gpl-2.0 | 3,120 | 0.006731 | #!/usr/bin/python
import email
from email.parser import HeaderParser
import imaplib
import mimetypes
import getpass
import sys
import os
from config import email_id, assignments_folder
class Mailbox(object):
def __init__(self, email_id, password, server='imap.gmail.com'):
self.mbox = imaplib.IMAP4_SSL(server)
self.mbox.login(email_id, password)
self.header_parser = HeaderParser()
def select(self, folder_name):
self.mbox.select(folder_name)
def print_headers(self, msg_part):
print(msg_part)
for item in msg_part.items():
print(item[0], item[1])
def fetch_messages(self, folder, unread=True):
search_str = "ALL"
if unread:
search_str = "(UNSEEN)"
result, data = self.mbox.uid('search', None, search_str)
email_ids = data[0].split()
i = 1
for email_id in email_ids:
result, data = self.mbox.uid('fetch', email_id, '(RFC822)')
raw_email = data[0][1]
email_message = email.message_from_string(str(raw_email))
headers = self.header_parser.parsestr(raw_email)
#print(email_message.is_multipart())
payloads = email_message.get_payload()
save_folder = os.path.join(folder, email_id)
print("Saving email received from: " + headers['From'])
self.save(email_message, headers, save_folder)
def save(self, msg, headers, folder):
if not os.path.exists(folder):
os.mkdir(folder)
email_text = "Subject: {e_subject}\nFrom: {e_from}\nDate: {e_date}\n\n".format(
e_subject=headers['Subject'],
e_from=headers['From'],
e_date=headers['Date']
)
for part in msg.walk():
# multipart/* are just containers
if part.get_content_maintype() == 'multipart':
continue
# Applications should really sanitize the given filename so that an
# email message can't be used to overwrite important files
filename = part.get_filename()
#print("Filename " + str(filename))
if not filename:
ext = mimetypes.guess_extension(part.get_content_type())
#print(ext)
if '.ksh' == ext:
email_text += part.get_payload(decode=True)
#print | (part.get_payload(decode=True))
else:
fp = open(os.path.join(folder, filename), 'wb')
fp.write(part.get_payload(decode=True).strip())
fp.close()
open(os.path.join(folder, '_email.txt'), 'wb').write(email_text)
if '__main__' == __na | me__:
if len(sys.argv) != 2:
print("Usage:\n\n\t %s email_save_folder" % sys.argv[0])
sys.exit()
password = getpass.getpass("Enter password for %s: " % email_id)
M = Mailbox(email_id, password)
M.select(assignments_folder)
M.fetch_messages(folder=sys.argv[1])
|
binoculars/osf.io | addons/base/tests/models.py | Python | apache-2.0 | 24,742 | 0.001213 | import abc
import mock
import pytest
from addons.base.tests.utils import MockFolder
from django.utils import timezone
from framework.auth import Auth
from framework.exceptions import HTTPError
from nose.tools import (assert_equal, assert_false, assert_in, assert_is,
assert_is_none, assert_not_in, assert_raises,
assert_true)
from osf_tests.factories import ProjectFactory, UserFactory
from tests.utils import mock_auth
from addons.base import exceptions
from osf_tests.conftest import request_context
pytestmark = pytest.mark.django_db
class OAuthAddonModelTestSuiteMixinBase(object):
___metaclass__ = abc.ABCMeta
@abc.abstractproperty
def short_name(self):
pass
@abc.abstractproperty
def full_name(self):
pass
@abc.abstractproperty
def ExternalAccountFactory(self):
pass
class OAuthAddonUserSettingTestSuiteMixin(OAuthAddonModelTestSuiteMixinBase):
def setUp(self):
self.node = ProjectFactor | y()
self.user = self.node.creator
self.external_account = self.ExternalAccountFactory()
self.user.external_accounts.add(self.external_account)
self.user.save()
self.user_settings | = self.user.get_or_add_addon(self.short_name)
def test_mergability(self):
assert self.user_settings.can_be_merged
def test_merge_user_settings(self):
other_node = ProjectFactory()
other_user = other_node.creator
other_account = self.ExternalAccountFactory()
other_user.external_accounts.add(other_account)
other_user_settings = other_user.get_or_add_addon(self.short_name)
other_node_settings = other_node.get_or_add_addon(self.short_name, auth=Auth(other_user))
other_node_settings.set_auth(
user=other_user,
external_account=other_account
)
assert other_node_settings.has_auth
assert other_node._id not in self.user_settings.oauth_grants
assert other_node_settings.user_settings == other_user_settings
self.user.merge_user(other_user)
self.user.save()
other_node_settings.reload()
self.user_settings.reload()
assert other_node_settings.has_auth
assert other_node._id in self.user_settings.oauth_grants
assert other_node_settings.user_settings == self.user_settings
def test_grant_oauth_access_no_metadata(self):
self.user_settings.grant_oauth_access(
node=self.node,
external_account=self.external_account,
)
self.user_settings.save()
assert self.user_settings.oauth_grants == {self.node._id: {self.external_account._id: {}}}
def test_grant_oauth_access_metadata(self):
self.user_settings.grant_oauth_access(
node=self.node,
external_account=self.external_account,
metadata={'folder': 'fake_folder_id'}
)
self.user_settings.save()
assert self.user_settings.oauth_grants == {
self.node._id: {
self.external_account._id: {'folder': 'fake_folder_id'}
},
}
def test_verify_oauth_access_no_metadata(self):
self.user_settings.grant_oauth_access(
node=self.node,
external_account=self.external_account,
)
self.user_settings.save()
assert_true(
self.user_settings.verify_oauth_access(
node=self.node,
external_account=self.external_account
)
)
assert_false(
self.user_settings.verify_oauth_access(
node=self.node,
external_account=self.ExternalAccountFactory()
)
)
def test_verify_oauth_access_metadata(self):
self.user_settings.grant_oauth_access(
node=self.node,
external_account=self.external_account,
metadata={'folder': 'fake_folder_id'}
)
self.user_settings.save()
assert_true(
self.user_settings.verify_oauth_access(
node=self.node,
external_account=self.external_account,
metadata={'folder': 'fake_folder_id'}
)
)
assert_false(
self.user_settings.verify_oauth_access(
node=self.node,
external_account=self.external_account,
metadata={'folder': 'another_folder_id'}
)
)
class OAuthAddonNodeSettingsTestSuiteMixin(OAuthAddonModelTestSuiteMixinBase):
@pytest.yield_fixture(autouse=True)
def _request_context(self, app):
context = app.test_request_context(headers={
'Remote-Addr': '146.9.219.56',
'User-Agent': 'Mozilla/5.0 (X11; U; SunOS sun4u; en-US; rv:0.9.4.1) Gecko/20020518 Netscape6/6.2.3'
})
context.push()
yield context
context.pop()
@abc.abstractproperty
def NodeSettingsFactory(self):
pass
@abc.abstractproperty
def NodeSettingsClass(self):
pass
@abc.abstractproperty
def UserSettingsFactory(self):
pass
def _node_settings_class_kwargs(self, node, user_settings):
return {
'user_settings': self.user_settings,
'folder_id': '1234567890',
'owner': self.node
}
def setUp(self):
super(OAuthAddonNodeSettingsTestSuiteMixin, self).setUp()
self.node = ProjectFactory()
self.user = self.node.creator
self.external_account = self.ExternalAccountFactory()
self.user.add_addon(self.short_name)
self.user.external_accounts.add(self.external_account)
self.user.save()
self.user_settings = self.user.get_addon(self.short_name)
self.user_settings.grant_oauth_access(
node=self.node,
external_account=self.external_account,
metadata={'folder': '1234567890'}
)
self.user_settings.save()
self.node_settings = self.NodeSettingsFactory(
**self._node_settings_class_kwargs(self.node, self.user_settings)
)
self.node_settings.external_account = self.external_account
self.node_settings.save()
def tearDown(self):
super(OAuthAddonNodeSettingsTestSuiteMixin, self).tearDown()
self.user_settings.delete()
self.external_account.delete()
self.node.delete()
self.user.delete()
@pytest.mark.django_db
def test_configured_true(self):
assert_true(self.node_settings.has_auth)
assert_true(self.node_settings.complete)
assert_true(self.node_settings.configured)
def test_configured_false(self):
self.node_settings.clear_settings()
self.node_settings.save()
assert_false(self.node_settings.configured)
def test_complete_true(self):
assert_true(self.node_settings.has_auth)
assert_true(self.node_settings.complete)
def test_complete_has_auth_not_verified(self):
with mock_auth(self.user):
self.user_settings.revoke_oauth_access(self.external_account)
self.node_settings.reload()
assert_false(self.node_settings.has_auth)
assert_false(self.node_settings.complete)
assert_equal(
self.user_settings.oauth_grants,
{self.node._id: {}}
)
def test_revoke_remote_access_called(self):
with mock.patch.object(self.user_settings, 'revoke_remote_oauth_access') as mock_revoke:
with mock_auth(self.user):
self.user_settings.revoke_oauth_access(self.external_account)
assert_equal(mock_revoke.call_count, 1)
def test_revoke_remote_access_not_called(self):
user2 = UserFactory()
user2.external_accounts.add(self.external_account)
user2.save()
with mock.patch.object(self.user_settings, 'revoke_remote_oauth_access') as mock_revoke:
with mock_auth(self.user):
self.user_settings.revoke_oauth_access(self.external_account)
assert_equal(mock_revoke.call_count, 0)
de |
oseledets/ttpy | examples/test_eigb.py | Python | mit | 544 | 0.020221 | from __future__ import | print_function, absolute_import, division
import sys
sys.path.append('../')
import numpy as np
import tt
from tt.eigb import *
import time
""" This code computes many eigenvalus of the Laplacian operator """
d = 8
f = 8
A = tt.qlaplace_dd([d]*f)
#A = (-1)*A
#A = tt.eye(2,d)
n = [2] *(d * f)
r = [8] *(d * f + 1)
r[0] = 1
r[d * f] = 8 #Number of eigenvalues sought
x = tt.rand(n, d * f, r)
#x = tt_ones(2,d)
t = time.time()
y, lam = eigb(A, x, 1e-6)
t1 = time.time()
print('Eigenvalues:', lam | )
print('Time is:', t1-t)
|
OpenWinCon/OpenWinNet | web-gui/myvenv/lib/python3.4/site-packages/pip/commands/freeze.py | Python | apache-2.0 | 4,647 | 0.002798 | import re
im | port sys
import pip
from pip.req import InstallRequirement
from pip.log import logger
from pip.basecommand import Command
from pip.util import get_installed_distributions
import pkg_resources
| class FreezeCommand(Command):
"""Output installed packages in requirements format."""
name = 'freeze'
usage = """
%prog [options]"""
summary = 'Output installed packages in requirements format.'
def __init__(self, *args, **kw):
super(FreezeCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'-r', '--requirement',
dest='requirement',
action='store',
default=None,
metavar='file',
help="Use the order in the given requirements file and it's comments when generating output.")
self.cmd_opts.add_option(
'-f', '--find-links',
dest='find_links',
action='append',
default=[],
metavar='URL',
help='URL for finding packages, which will be added to the output.')
self.cmd_opts.add_option(
'-l', '--local',
dest='local',
action='store_true',
default=False,
help='If in a virtualenv that has global access, do not output globally-installed packages.')
self.parser.insert_option_group(0, self.cmd_opts)
def setup_logging(self):
logger.move_stdout_to_stderr()
def run(self, options, args):
requirement = options.requirement
find_links = options.find_links or []
local_only = options.local
## FIXME: Obviously this should be settable:
find_tags = False
skip_match = None
skip_regex = options.skip_requirements_regex
if skip_regex:
skip_match = re.compile(skip_regex)
dependency_links = []
f = sys.stdout
for dist in pkg_resources.working_set:
if dist.has_metadata('dependency_links.txt'):
dependency_links.extend(dist.get_metadata_lines('dependency_links.txt'))
for link in find_links:
if '#egg=' in link:
dependency_links.append(link)
for link in find_links:
f.write('-f %s\n' % link)
installations = {}
for dist in get_installed_distributions(local_only=local_only):
req = pip.FrozenRequirement.from_dist(dist, dependency_links, find_tags=find_tags)
installations[req.name] = req
if requirement:
req_f = open(requirement)
for line in req_f:
if not line.strip() or line.strip().startswith('#'):
f.write(line)
continue
if skip_match and skip_match.search(line):
f.write(line)
continue
elif line.startswith('-e') or line.startswith('--editable'):
if line.startswith('-e'):
line = line[2:].strip()
else:
line = line[len('--editable'):].strip().lstrip('=')
line_req = InstallRequirement.from_editable(line, default_vcs=options.default_vcs)
elif (line.startswith('-r') or line.startswith('--requirement')
or line.startswith('-Z') or line.startswith('--always-unzip')
or line.startswith('-f') or line.startswith('-i')
or line.startswith('--extra-index-url')
or line.startswith('--find-links')
or line.startswith('--index-url')):
f.write(line)
continue
else:
line_req = InstallRequirement.from_line(line)
if not line_req.name:
logger.notify("Skipping line because it's not clear what it would install: %s"
% line.strip())
logger.notify(" (add #egg=PackageName to the URL to avoid this warning)")
continue
if line_req.name not in installations:
logger.warn("Requirement file contains %s, but that package is not installed"
% line.strip())
continue
f.write(str(installations[line_req.name]))
del installations[line_req.name]
f.write('## The following requirements were added by pip --freeze:\n')
for installation in sorted(installations.values(), key=lambda x: x.name):
f.write(str(installation))
|
chaos95/fluxxbot | pyGBot/Plugins/probability/Roll.py | Python | gpl-3.0 | 5,676 | 0.008633 | ##
## Roll - a plugin for pyGBot
## Copyright (C) 2008 Morgan Lokhorst-Blight, Alex Soborov
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
import random
import re
from pyGBot import log
from pyGBot.BasePlugin import BasePlugin
class Roll(BasePlugin):
__plugintype__ = "active"
def __init__(self, bot, options):
BasePlugin.__init__(self, bot, options)
self.activeGame = False
self.output = True
self.modbreak = re.compile('([\+\-\*\/%^].*)')
def rollDice(self, message):
param = message.partition("roll")[2].split()[0].rstrip(",.?").lower()
sections = self.modbreak.split(param, 1)[:2]
numdice, numsides = sections[0].split('d')
numdice = int(numdice)
if numdice > 75:
return None, None, None, None, 1
if numsides != 'f':
numsides = int(numsides)
if numsides > 10000:
return None, None, None, None, 2
if len(sections) > 1:
mod = sections[1].replace("^", "**")
else:
mod = ''
if numsides == 'f':
bottom = -1
top = 1
else:
bottom = 1
top = numsides
stack = []
for i in range(0, numdice):
stack.append(random.randint(bottom, top))
subtotal = sum(stack)
exec 'modtotal = subtotal %s' % mod
rolls = ', '.join(map(str,stack))
return rolls, subtotal, modtotal, mod, 0
def pubout(self, channel, message):
if self.output == True:
self.bot.pubout(channel, message)
# Event handlers for other users
def user_join(self, channel, username):
pass
def user_part(self, channel, username):
pass
def user_quit(self, username, reason=""):
pass
def user_nickchange(self, username, newname):
pass
# Event handlers for this bot
def bot_connect(self):
pass
def bot_join(self, channel):
pass
def bot_kicked(self, channel, kicker="", reason=""):
pass
def bot_disconnect(self):
pass
# Event handlers for incoming messages
def msg_channel(self, channel, user, message):
if message.lower().startswith(self.bot.nickname.lower()) and message.find(" roll ") != -1:
rolls, subtotal, modtotal, mod, status = self.rollDice(message)
if status == 0:
if mod != '':
mod = mod.replace("** | ", "^")
self.bot.pubout(channel, "Rolled %s for a subtotal of %d. With modifiers of %s, the total is %d" % (rolls, subtotal, mod, modtotal))
else:
self.bot.pubout(channel, "Rolled " + rolls + " for a total of %i." % modtotal)
elif st | atus == 1:
self.bot.pubout(channel, "That's too many dice! I won't roll more than 75 at once.")
elif status == 2:
self.bot.pubout(channel, "Why would you ever need dice with that many sides? I'm not rolling this.")
def msg_action(self, channel, user, message):
pass
def msg_private(self, user, message):
if message.lower().startswith("roll "):
rolls, subtotal, modtotal, mod, status = self.rollDice(message)
if status == 0:
if mod != '':
mod = mod.replace("**", "^")
self.bot.privout(user, "Rolled %s for a subtotal of %d. With modifiers of %s, the total is %d" % (rolls, subtotal, mod, modtotal))
else:
self.bot.privout(user, "Rolled " + rolls + " for a total of %i." % modtotal)
elif status == 1:
self.bot.privout(user, "That's too many dice! I won't roll more than 75 at once.")
elif status == 2:
self.bot.privout(user, "Why would you ever need dice with that many sides? I'm not rolling this.")
def msg_notice(self, user, message):
if message.lower().startswith("roll "):
rolls, subtotal, modtotal, mod, status = self.rollDice(message)
if status == 0:
if mod != '':
mod = mod.replace("**", "^")
self.bot.noteout(user, "Rolled %s for a subtotal of %d. With modifiers of %s, the total is %d" % (rolls, subtotal, mod, modtotal))
else:
self.bot.noteout(user, "Rolled " + rolls + " for a total of %i." % modtotal)
elif status == 1:
self.bot.noteout(user, "That's too many dice! I won't roll more than 75 at once.")
elif status == 2:
self.bot.noteout(user, "Why would you ever need dice with that many sides? I'm not rolling this.")
def channel_names(self, channel, nameslist):
pass
|
wjlei1990/shakemovie_pyproc | src/source.py | Python | gpl-2.0 | 11,628 | 0.003354 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Source and Receiver classes of Instaseis.
:copyright:
Lion Krischer (krischer@geophysik.uni-muenchen.de), 2014
Martin van Driel (Martin@vanDriel.de), 2014
:license:
GNU Lesser General Public License, Version 3
(http://www.gnu.org/copyleft/lgpl.html)
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import collections
import numpy as np
import obspy
from obspy.core.util.geodetics import FlinnEngdahl
from obspy.signal.filter import lowpass
import obspy.xseed
import os
from scipy import interp
import warnings
from obspy import readEvents
DEFAULT_MU = 32e9
class CMTSource(object):
"""
Class to handle a seismic moment tensor source including a source time
function.
"""
def __init__(self, origin_time=obspy.UTCDateTime(0),
pde_latitude=0.0, pde_longitude=0.0, mb=0.0, ms=0.0, pde_depth_in_m=None,
region_tag=None, eventname=None, cmt_time=0.0, half_duration=0.0,
latitude=0.0, longitude=0.0, depth_in_m=None,
m_rr=0.0, m_tt=0.0, m_pp=0.0, m_rt=0.0, m_rp=0.0, m_tp=0.0):
"""
:param latitude: latitude of the source in degree
:param longitude: longitude of the source in degree
:param depth_in_m: source depth in m
:param m_rr: moment tensor components in r, theta, phi in Nm
:param m_tt: moment tensor components in r, theta, phi in Nm
:param m_pp: moment tensor components in r, theta, phi in Nm
:param m_rt: moment tensor components in r, theta, phi in Nm
:param m_rp: moment tensor components in r, theta, phi in Nm
:param m_tp: moment tensor components in r, theta, phi in Nm
:param time_shift: correction of the origin time in seconds. only
useful in the context of finite sources
:param sliprate: normalized source time function (sliprate)
:param dt: sampling of the source time function
:param origin_time: The origin time of the source. This will be the
time of the first sample in the final seismogram. Be careful to
adjust it for any time shift or STF (de)convolution effects.
"""
self.origin_time = origin_time
self.pde_latitude = pde_latitude
self.pde_longitude = pde_longitude
self.pde_depth_in_m = pde_depth_in_m
self.mb = mb
self.ms = ms
self.region_tag = region_tag
self.eventname = eventname
self.cmt_time = cmt_time
self.half_duration = half_duration
self.latitude = latitude
self.longitude = longitude
self.depth_in_m = depth_in_m
self.m_rr = m_rr
self.m_tt = m_tt
self.m_pp = m_pp
self.m_rt = m_rt
self.m_rp = m_rp
self.m_tp = m_tp
@classmethod
def from_CMTSOLUTION_file(self, filename):
"""
Initialize a source object from a CMTSOLUTION file.
:param filename: path to the CMTSOLUTION file
"""
with open(filename, "rt") as f:
line = f.readline()
origin_time = line[4:].strip().split()[:6]
values = list(map(int, origin_time[:-1])) + \
[float(origin_time[-1])]
try:
origin_time = obspy.UTCDateTime(*values)
except (TypeError, ValueError):
warnings.warn("Could not determine origin time from line: %s"
% line)
origin_time = obspy.UTCDateTime(0)
otherinfo = line[4:].strip().split()[6:]
#print("otherinfo:", otherinfo)
pde_lat = float(otherinfo[0])
pde_lon = float(otherinfo[1])
pde_depth_in_m = float(otherinfo[2]) * 1e3
mb = float(otherinfo[3])
ms = float(otherinfo[4])
region_tag = ' '.join(otherinfo[5:])
eventname = f.readline().strip().split()[-1]
time_shift = float(f.readline().strip().split()[-1])
cmt_time = origin_time + time_shift
half_duration = float(f.readline().strip().split()[-1])
latitude = float(f.readline().strip().split()[-1])
longitude = float(f.readline().strip().split()[-1])
depth_in_m = float(f.readline().strip().split()[-1]) * 1e3
# unit: N/m
m_rr = float(f.readline().strip().split()[-1]) #/ 1e7
m_tt = float(f.readline().strip().split()[-1]) #/ 1e7
m_pp = float(f.readline().strip().split()[-1]) #/ 1e7
m_rt = float(f.readline().strip().split()[-1]) #/ 1e7
m_rp = float(f.readline().strip().split()[-1]) #/ 1e7
m_tp = float(f.readline().strip().split()[-1]) #/ 1e7
return self(origin_time=origin_time,
pde_latitude=pde_lat, pde_longitude=pde_lon, mb=mb, ms=ms, pde_depth_in_m=pde_depth_in_m,
region_tag=region_tag, eventname=eventname, cmt_time=cmt_time, half_duration=half_duration,
latitude=latitude, longitude=longitude, depth_in_m=depth_in_m,
m_rr=m_rr, m_tt=m_tt, m_pp=m_pp, m_rt=m_rt, m_rp=m_rp, m_tp=m_tp)
@classmethod
def from_quakeml_file(self, filename):
"""
Initizliaze a source object from a quakeml file
:param filename: path to a quakeml file
"""
from obspy import readEvents
cat = readEvents(filename)
event = cat[0]
cmtsolution = event.preferred_origin()
pdesolution = event.origins[0]
origin_time = pdesolution.time
pde_lat = pdesolution.latitude
pde_lon = pdesolution.longitude
pde_depth_in_m = pdesolution.depth
for mag in event.magnitudes:
if mag.magnitude_type == "mb":
mb = mag.mag
| elif mag.magnitude_type == "MS":
ms = mag.mag
region_tag = event.event_descriptions[0].text
for descrip in event.event_descriptions:
if descrip.type == "earthquake name":
eventname = descrip.text
| eventname = self.adjust_eventname(eventname)
cmt_time = cmtsolution.time
focal_mechanism = event.focal_mechanisms[0]
half_duration = focal_mechanism.moment_tensor.source_time_function.duration/2.0
latitude = cmtsolution.latitude
longitude = cmtsolution.longitude
depth_in_m = cmtsolution.depth
tensor = focal_mechanism.moment_tensor.tensor
m_rr = tensor.m_rr * 1e7
m_tt = tensor.m_tt * 1e7
m_pp = tensor.m_pp * 1e7
m_rt = tensor.m_rt * 1e7
m_rp = tensor.m_rp * 1e7
m_tp = tensor.m_tp * 1e7
return self(origin_time=origin_time,
pde_latitude=pde_lat, pde_longitude=pde_lon, mb=mb, ms=ms, pde_depth_in_m=pde_depth_in_m,
region_tag=region_tag, eventname=eventname, cmt_time=cmt_time, half_duration=half_duration,
latitude=latitude, longitude=longitude, depth_in_m=depth_in_m,
m_rr=m_rr, m_tt=m_tt, m_pp=m_pp, m_rt=m_rt, m_rp=m_rp, m_tp=m_tp)
def write_CMTSOLUTION_file(self, filename):
"""
Initialize a source object from a CMTSOLUTION file.
:param filename: path to the CMTSOLUTION file
"""
time_shift = self.cmt_time - self.origin_time
with open(filename, "w") as f:
# Reconstruct the first line as well as possible. All
# hypocentral information is missing.
f.write(' PDE %4i %2i %2i %2i %2i %5.2f %8.4f %9.4f %5.1f %.1f %.1f'
' %s\n' % (
self.origin_time.year,
self.origin_time.month,
self.origin_time.day,
self.origin_time.hour,
self.origin_time.minute,
self.origin_time.second +
self.origin_time.microsecond / 1E6,
self.pde_latitude,
self.pde_longitude,
|
lmazuel/azure-sdk-for-python | azure-mgmt-storage/azure/mgmt/storage/v2016_01_01/models/storage_account_regenerate_key_parameters.py | Python | mit | 957 | 0 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class StorageAccountRege | nerateKeyParameters(Model):
"""StorageAccountRegenerateKeyParameters.
:param key_name:
:type key_name: str
"""
_validation = {
'key_name': {'required': True},
}
_attribute_map = {
'key_name': {'key': 'keyName', 'type': 'str'},
}
def __init__(self, key_name):
super(StorageAccountRegenerateKeyParameters, self).__ | init__()
self.key_name = key_name
|
sbsdev/daisyproducer | daisyproducer/documents/templatetags/verbose_name.py | Python | agpl-3.0 | 214 | 0.014019 | from django import template
from django.utils.text import capfirst
register = | template.Library()
@register.filter
def verbose_name(obj, arg):
return capfirst | (obj._meta.get_field(arg).verbose_name)
|
hpk42/pluggy | docs/conf.py | Python | mit | 2,375 | 0 | import sys
if sys.version_info >= (3, 8):
from importlib import metadata
else:
import importlib_metadata | as metadata
extensions = [
"sphinx.ex | t.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.viewcode",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "pluggy"
copyright = "2016, Holger Krekel"
author = "Holger Krekel"
release = metadata.version(project)
# The short X.Y version.
version = ".".join(release.split(".")[:2])
language = None
pygments_style = "sphinx"
# html_logo = "_static/img/plug.png"
html_theme = "alabaster"
html_theme_options = {
"logo": "img/plug.png",
"description": "The pytest plugin system",
"github_user": "pytest-dev",
"github_repo": "pluggy",
"github_button": "true",
"github_banner": "true",
"github_type": "star",
"badge_branch": "master",
"page_width": "1080px",
"fixed_sidebar": "false",
}
html_sidebars = {
"**": ["about.html", "localtoc.html", "relations.html", "searchbox.html"]
}
html_static_path = ["_static"]
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "pluggy", "pluggy Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"pluggy",
"pluggy Documentation",
author,
"pluggy",
"One line description of project.",
"Miscellaneous",
)
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"pytest": ("https://docs.pytest.org/en/latest", None),
"setuptools": ("https://setuptools.readthedocs.io/en/latest", None),
"tox": ("https://tox.readthedocs.io/en/latest", None),
"devpi": ("https://devpi.net/docs/devpi/devpi/stable/+doc/", None),
"kedro": ("https://kedro.readthedocs.io/en/latest/", None),
}
|
anthraxx/diffoscope | diffoscope/presenters/html/templates.py | Python | gpl-3.0 | 5,651 | 0.001062 | # -*- coding: utf-8 -*-
#
# diffoscope: in-depth comparison of files, archives, and directories
#
# Copyright © 2016 Chris Lamb <lamby@debian.org>
#
# diffoscope is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# diffoscope is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with diffoscope. If not, see <https://www.gnu.org/licenses/>.
HEADER = """<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8" />
<meta http-equiv="x-ua-compatible" content="IE=edge">
<meta name="referrer" content="no-referrer" />
<meta name="generator" content="diffoscope" />
<link rel="icon" type="image/png" href="data:image/png;base64,%(favicon)s" />
<title>%(title)s</title>
<style type="text/css">
body.diffoscope {
background: white;
color: black;
}
.diffoscope .footer {
font-size: small;
}
.diffoscope .difference {
border: outset #888 1px;
background: #E8E8E8;
background: rgba(0,0,0,.1);
padding: 0.5em;
margin: 0.5em 0;
}
.diffoscope .difference table {
table-layout: fixed;
width: 100%%;
border: 0;
}
.diffoscope .difference th,
.diffoscope .difference td {
border: 0;
}
.diffoscope table.diff {
border: 0;
border-collapse:collapse;
font-size:0.75em;
font-family: 'Lucida Console', monospace;
}
.diffoscope table.diff tr:hover td {
background: #FFFF00;
}
.diffoscope .line {
color:#8080a0
}
.diffoscope th {
background: blac | k;
color: white
}
.diffoscope .diffunmodified td {
background: #D0D0E0
}
.diffoscope .diffhunk td {
background: #A0A0A0
}
.diffoscope .diffadded td {
background: #CCFFCC
}
.diffoscope .diffdeleted td {
background: #FFCCCC
}
.diffoscope .diffchanged td {
background: #FFFFA0
}
.diffoscope ins, del {
background: #E0C880;
text-decoration: none
}
.diffoscope .diffponct {
color: #B08080
}
.diff | oscope .comment {
font-style: italic;
}
.diffoscope .source {
font-weight: bold;
}
.diffoscope .error {
border: solid black 1px;
background: red;
color: white;
padding: 0.2em;
}
.diffoscope .anchor {
margin-left: 0.5em;
font-size: 80%%;
color: #333;
text-decoration: none;
display: none;
}
.diffoscope .diffheader:hover .anchor {
display: inline;
}
.diffoscope table.diff tr.ondemand td {
background: #f99;
text-align: center;
padding: 0.5em 0;
}
.diffoscope table.diff tr.ondemand:hover td {
background: #faa;
cursor: pointer;
}
.diffoscope .diffcontrol {
float: left;
margin-right: 0.3em;
cursor: pointer;
display: none; /* currently, only available in html-dir output where jquery is enabled */
}
.diffoscope .diffcontrol-double {
line-height: 200%%;
}
.diffoscope .colines {
width: 3em;
}
.diffoscope .coldiff {
width: 99%%;
}
</style>
%(css_link)s
</head>
<body class="diffoscope">
"""
FOOTER = """
<div class="footer">Generated by <a href="https://diffoscope.org" rel="noopener noreferrer" target="_blank">diffoscope</a> %(version)s</div>
</body>
</html>
"""
SCRIPTS = """
<script src="%(jquery_url)s"></script>
<script type="text/javascript">
$(function() {
var load_cont = function() {
var a = $(this).find("a");
var textparts = /^(.*)\((\d+) pieces?(.*)\)$/.exec(a.text());
var numleft = Number.parseInt(textparts[2]) - 1;
var noun = numleft == 1 ? "piece" : "pieces";
var newtext = textparts[1] + "(" + numleft + " " + noun + textparts[3] + ")";
var filename = a.attr('href');
var td = a.parent();
td.text('... loading ...');
td.parent().load(filename + " tr", function() {
// https://stackoverflow.com/a/8452751/946226
var elems = $(this).children(':first').unwrap();
// set this behaviour for the next link too
var td = elems.parent().find(".ondemand td");
td.find("a").text(newtext);
td.on('click', load_cont);
});
return false;
};
$(".ondemand td").on('click', load_cont);
var diffcontrols = $(".diffcontrol");
diffcontrols.on('click', function(evt) {
var control = $(this);
var target = control.parent().siblings('table.diff, div.difference');
var orig = target;
if (evt.shiftKey) {
var parent = control.parent().parent();
control = parent.find('.diffcontrol');
target = parent.find('table.diff, div.difference');
}
if (orig.is(":visible")) {
target.hide();
control.text("[+]");
} else {
target.show();
control.text("[−]");
}
});
diffcontrols.attr('title','shift-click to show/hide all children too.');
diffcontrols.show();
});
</script>
"""
UD_TABLE_HEADER = u"""<table class="diff">
<colgroup><col class="colines"/><col class="coldiff"/>
<col class="colines"/><col class="coldiff"/></colgroup>
"""
UD_TABLE_FOOTER = u"""<tr class="ondemand"><td colspan="4">
... <a href="%(filename)s">%(text)s</a> ...
</td></tr>
</table>
"""
|
ptesarik/crash-python | crash/cache/syscache.py | Python | gpl-2.0 | 6,029 | 0.003981 | # -*- coding: utf-8 -*-
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from builtins import round
import gdb
import re
import zlib
import sys
from datetime import timedelta
if sys.version_info.major >= 3:
long = int
from crash.exceptions import DelayedAttributeError
from crash.cache import CrashCache
from crash.util import array_size
from crash.infra import export
from crash.infra.lookup import get_delayed_lookup
class CrashUtsnameCache(CrashCache):
__symvals__ = [ 'init_uts_ns' ]
def load_utsname(self):
self.utsname = self.init_uts_ns['name']
return self.utsname
def init_utsname_cache(self):
d = {}
for field in self.utsname.type.fields():
val = self.utsname[field.name].string()
d[field.name] = val
self.utsname_cache = d
return self.utsname_cache
utsname_fields = [ 'sysname', 'nodename', 'release',
'version', 'machine', 'domainname' ]
def __getattr__(self, name):
if name == 'utsname_cache':
return self.init_utsname_cache()
elif name == 'utsname':
return self.load_utsname()
if name in self.utsname_fields:
return self.utsname_cache[name]
return getattr(self.__class__, name)
class CrashConfigCache(CrashCache):
__types__ = [ 'char *' ]
__symvals__ = [ 'kernel_config_data' ]
def __getattr__(self, name):
if name == 'config_buffer':
return self.decompress_config_buffer()
elif name == 'ikconfig_cache':
return self._parse_config()
return getattr(self.__class__, name)
@staticmethod
def read_buf(address, size):
return str(gdb.selected_inferior().read_memory(address, size))
def decompress_config_buffer(self):
MAGIC_START = 'IKCFG_ST'
MAGIC_END = 'IKCFG_ED'
# Must cast it to char * to do the pointer arithmetic correctly
data_addr = self.kernel_config_data.address.cast(self.char_p_type)
data_len = self.kernel_config_data.type.sizeof
buf_len = len(MAGIC_START)
buf = self.read_buf(data_addr, buf_len)
if buf != MAGIC_START:
raise IOError("Missing MAGIC_START in kernel_config_data.")
buf_len = len(MAGIC_END)
buf = self.read_buf(data_addr + data_len - buf_len - 1, buf_len)
if buf != MAGIC_END:
raise IOError("Missing MAGIC_END in kernel_config_data.")
# Read the compressed data
buf_len = data_len - len(MAGIC_START) - len(MAGIC_END)
buf = self.read_buf(data_addr + len(MAGIC_START), buf_len)
self.config_buffer = zlib.decompress(buf, 16 + zlib.MAX_WBITS)
return self.config_buffer
def __str__(self):
return self.config_buffer
def _parse_config(self):
self.ikconfig_cache = {}
for line in self.config_buffer.splitlines():
# bin comments
line = re.sub("#.*$", "", line).strip()
if not line:
continue
m = re.match("CONFIG_([^=]*)=(.*)", line)
if m:
self.ikconfig_cache[m.group(1)] = m.group(2)
return self.ikconfig_cache
def __getitem__(self, name):
return self.ikconfig_cache[name]
class CrashKernelCache(CrashCache):
__symvals__ = [ 'avenrun' ]
__symbol_callbacks__ = [
( 'jiffies', 'setup_jiffies' ),
( 'jiffies_64', 'setup_jiffies' ) ]
__delayed_values__ = [ 'jiffies' ]
jiffies_ready = False
adjust_jiffies = False
def __init__(self, config):
CrashCache.__init__(self)
self.config = config
def __getattr__(self, name):
if name == 'hz':
self.hz = long(self.config['HZ'])
return self.hz
elif name == 'uptime':
return self.get_uptime()
elif name == 'loadavg':
return self.get_loadavg()
return getattr(self.__class__, name)
@staticmethod
def calculate_loadavg(metric):
# The kernel needs to do fixed point trickery to calculate
# a floating point average. We can just return a float.
return round(long(metric) / (1 << 11), 2)
@staticmethod
def format_loadavg(metrics):
out = []
for metric in metrics:
out.append(str(metric))
return " ".join(out)
def get_loadavg_values(self):
metrics = []
for index in range(0, array_size(self.avenrun)):
metrics.append(self.calculate_loadavg(self.avenrun[index]))
return metrics
def get_loadavg(self):
try:
metrics = self.get_lo | adavg_values()
self.loadavg = self.format_loadavg(metrics)
return self.loadavg
except DelayedAttributeError:
return "Unknown"
@classmethod
def setup | _jiffies(cls, symbol):
if cls.jiffies_ready:
return
jiffies_sym = gdb.lookup_global_symbol('jiffies_64')
if jiffies_sym:
try:
jiffies = long(jiffies_sym.value())
except gdb.MemoryError:
return False
cls.adjust_jiffies = True
else:
jiffies = long(gdb.lookup_global_symbol('jiffies').value())
cls.adjust_jiffies = False
delayed = get_delayed_lookup(cls, 'jiffies').callback(jiffies)
def adjusted_jiffies(self):
if self.adjust_jiffies:
return self.jiffies -(long(0x100000000) - 300 * self.hz)
else:
return self.jiffies
def get_uptime(self):
self.uptime = timedelta(seconds=self.adjusted_jiffies() // self.hz)
return self.uptime
@export
def jiffies_to_msec(self, jiffies):
return 1000 // self.hz * jiffies
utsname = CrashUtsnameCache()
config = CrashConfigCache()
kernel = CrashKernelCache(config)
|
wackerly/faucet | faucet/valve_acl.py | Python | apache-2.0 | 6,809 | 0.000587 | """Compose ACLs on ports."""
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2018 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from faucet import valve_of
from faucet.conf import InvalidConfigError
def push_vlan(vlan_vid):
"""Push a VLAN tag with optional selection of eth type."""
vid = vlan_vid
vlan_eth_type = None
if isinstance(vlan_vid, dict):
vid = vlan_vid['vid']
if 'eth_type' in vlan_vid:
vlan_eth_type = vlan_vid['eth_type']
if vlan_eth_type is None:
return valve_of.push_vlan_act(vid)
return valve_of.push_vlan_act(vid, eth_type=vlan_eth_type)
def rewrite_vlan(output_dict):
"""Implement actions to rewrite VLAN headers."""
vlan_actions = []
if 'pop_vlans' in output_dict:
for _ in range(output_dict['pop_vlans']):
vlan_actions.append(valve_of.pop_vlan())
# if vlan tag is specified, push it.
if 'vlan_vid' in output_dict:
vlan_actions.extend(push_vlan(output_dict['vlan_vid']))
# swap existing VID
elif 'swap_vid' in output_dict:
vlan_actions.append(
valve_of.set_vlan_vid(output_dict['swap_vid']))
# or, if a list, push them all (all with type Q).
elif 'vlan_vids' in output_dict:
for vlan_vid in output_dict['vlan_vids']:
vlan_actions.extend(push_vlan(vlan_vid))
return vlan_actions
def build_output_actions(output_dict):
"""Implement actions to alter packet/output."""
output_actions = []
output_port = None
ofmsgs = []
# rewrite any VLAN headers first always
vlan_actions = rewrite_vlan(output_dict)
if vlan_actions:
output_actions.extend(vlan_actions)
if 'set_fields' in output_dict:
for set_fields in output_dict['set_fields']:
output_actions.append(valve_of.set_field(**set_fields))
if 'port' in output_dict:
output_port = output_dict['port']
output_actions.append(valve_of.output_port(output_port))
if 'ports' in output_dict:
for output_port in output_dict['ports']:
output_actions.append(valve_of.output_port(output_port))
if 'failover' in output_dict:
failover = output_dict['failover']
group_id = failover['group_id']
buckets = []
for port in failover['ports']:
buckets.append(valve_of.bucket(
watch_port=port, actions=[valve_of.output_port(port)]))
ofmsgs.append(valve_of.groupdel(group_id=group_id))
ofmsgs.append(valve_of.groupadd_ff(group_id=group_id, buckets=buckets))
output_actions.append(valve_of.group_act(group_id=group_id))
return (output_port, output_actions, ofmsgs)
# TODO: change | this, maybe this can be rewritten easily
# possibly replace with a class for ACLs
def build_acl_entry(rule_conf, meters,
acl_allow_inst, acl_force_port_vlan_inst,
port_num=None, vlan_vid=None):
acl_inst = []
acl_act = []
acl_match_dict = {}
acl_ofmsgs = []
acl_cookie = None
allow_inst = acl_allow_inst
for attrib, attrib_value in list(rule_co | nf.items()):
if attrib == 'in_port':
continue
if attrib == 'cookie':
acl_cookie = attrib_value
continue
if attrib == 'description':
continue
if attrib == 'actions':
allow = False
allow_specified = False
if 'allow' in attrib_value:
allow_specified = True
if attrib_value['allow'] == 1:
allow = True
if 'force_port_vlan' in attrib_value:
if attrib_value['force_port_vlan'] == 1:
allow_inst = acl_force_port_vlan_inst
if 'meter' in attrib_value:
meter_name = attrib_value['meter']
acl_inst.append(valve_of.apply_meter(meters[meter_name].meter_id))
if 'mirror' in attrib_value:
port_no = attrib_value['mirror']
acl_act.append(valve_of.output_port(port_no))
if not allow_specified:
allow = True
if 'output' in attrib_value:
output_port, output_actions, output_ofmsgs = build_output_actions(
attrib_value['output'])
acl_act.extend(output_actions)
acl_ofmsgs.extend(output_ofmsgs)
# if port specified, output packet now and exit pipeline.
if output_port is not None:
continue
if allow:
acl_inst.append(allow_inst)
else:
acl_match_dict[attrib] = attrib_value
if port_num is not None:
acl_match_dict['in_port'] = port_num
if vlan_vid is not None:
acl_match_dict['vlan_vid'] = valve_of.vid_present(vlan_vid)
try:
acl_match = valve_of.match_from_dict(acl_match_dict)
except TypeError:
raise InvalidConfigError('invalid type in ACL')
if acl_act:
acl_inst.append(valve_of.apply_actions(acl_act))
return (acl_match, acl_inst, acl_cookie, acl_ofmsgs)
def build_acl_ofmsgs(acls, acl_table,
acl_allow_inst, acl_force_port_vlan_inst,
highest_priority, meters,
exact_match, port_num=None, vlan_vid=None):
ofmsgs = []
acl_rule_priority = highest_priority
for acl in acls:
for rule_conf in acl.rules:
acl_match, acl_inst, acl_cookie, acl_ofmsgs = build_acl_entry(
rule_conf, meters,
acl_allow_inst, acl_force_port_vlan_inst,
port_num, vlan_vid)
ofmsgs.extend(acl_ofmsgs)
if exact_match:
flowmod = acl_table.flowmod(
acl_match, priority=highest_priority, inst=acl_inst, cookie=acl_cookie)
else:
flowmod = acl_table.flowmod(
acl_match, priority=acl_rule_priority, inst=acl_inst, cookie=acl_cookie)
ofmsgs.append(flowmod)
acl_rule_priority -= 1
return ofmsgs
|
redhataccess/redhat-support-tool | src/redhat_support_tool/plugins/list_cases.py | Python | apache-2.0 | 17,874 | 0.001231 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from optparse import Option
from collections import deque
from redhat_support_lib.infrastructure.errors import RequestError, \
ConnectionError
from redhat_support_lib.infrastructure import contextmanager
from redhat_support_tool.helpers.confighelper import _
from redhat_support_tool.helpers.confighelper import EmptyValueError
from redhat_support_tool.plugins import InteractivePlugin, ObjectDisplayOption
from redhat_support_tool.helpers.constants import Constants
from redhat_support_tool.helpers.launchhelper import LaunchHelper
from redhat_support_tool.plugins.get_case import GetCase
import pydoc
import redhat_support_tool.helpers.common as common
import redhat_support_tool.helpers.apihelper as apihelper
import redhat_support_tool.helpers.confighelper as confighelper
import logging
__author__ = 'Keith Robertson <kroberts@redhat.com>'
logger = logging.getLogger("redhat_support_tool.plugins.list_cases")
class ListCases(InteractivePlugin):
plugin_name = 'listcases'
ALL = _("Display all cases")
partial_entries = _('%s of %s cases displayed. Type \'m\' to see more.')
end_of_entries = _('No more cases to display')
_submenu_opts = None
_sections = None
casesAry = None
# Help should not print the option list
help_is_options = False
# Record the last offset value used with the API, and the maximum results
# we should display for one search query.
_nextOffset = 0
_MAX_OFFSET = confighelper.get_config_helper().get(option='max_results')
_MAX_OFFSET = 1500 if not _MAX_OFFSET else int(_MAX_OFFSET)
_limit = 50 if _MAX_OFFSET >= 50 else _MAX_OFFSET
_caseGroupNumbers = None
# for displaying cases owned by an associate as per SFDC
_associateSSOName = None
_view = None
@classmethod
def get_usage(cls):
'''
The usage statement that will be printed by OptionParser.
Example:
- %prog -c CASENUMBER [options] <comment text here>
Important: %prog is a OptionParser built-in. Use it!
'''
return _('%prog')
@classmethod
def get_desc(cls):
'''
The description statement that will be printed by OptionParser.
Example:
- 'Use the \'%s\' command to add a comment to a case.'\
% cls.plugin_name
'''
return _('Use the \'%s\' command to list your open support cases.\n'
'- For Red Hat employees it lists open cases in your queue.\n'
'- For other users it lists open cases in your account.\n'
% cls.plugin_name)
@classmethod
def get_epilog(cls):
'''
The epilog string that will be printed by OptionParser. Usually
used to print an example of how to use the program.
Example:
Examples:
- %s -c 12345678 Lorem ipsum dolor sit amet, consectetur adipisicing
- %s -c 12345678
'''
return _('Example:\n'
' - %s\n'
' - %s -g groupname -c -s status -a\n'
' - %s -o ownerSSOName -s severity\n'
' - %s -o all') % (cls.plugin_name,
cls.plugin_name,
cls.plugin_name,
cls.plugin_name)
@classmethod
def get_options(cls):
'''
Subclasses that need command line options should override this method
and return an array of optparse.Option(s) to be used by the
OptionParser.
Example:
return [Option("-f", "--file", action="store",
dest="filename", help='Some file'),
Option("-c", "--case",
action="store", dest="casenumber",
help='A case')]
Would produce the following:
Command (? for help): help mycommand
Usage: mycommand [options]
Use the 'mycommand' command to find a knowledge base solution by ID
Options:
-h, --help show this help message and exit
-f, --file Some file
-c, --case A case
Example:
- mycommand -c 12345 -f abc.txt
'''
return [Option('-c', '--includeclosed', dest='includeclosed',
action='store_true',
help=_('Show closed cases. (optional)'), default=False),
Option('-o', '--owner', dest='owner',
help=_('For Red Hat employees only. Show cases '
'for another Red Hat employee portal login ID.'
' Specify -o ALL to show cases in the Red Hat '
'account instead of your case queue. (optional)'
), default=None),
Option('-g', '--casegroup', dest='casegroup',
help=_('Show cases belonging to a particular case group'
' in your account. (optional) Note, use the '
'\'listcasegroups\' command to see the case '
'groups in your account.'), default=None),
#Option('-k', '--keyword', dest='keyword',
# help=_('Only show cases with the given keyword in '
# 'their title. (optional)'), default=None),
Option('-u', '--ungrouped', dest='ungrouped',
action='store_true',
help=_('Include ungrouped cases in results. When this '
'is set then -o owner options will be ignored.'
'(optional)'), default=False),
Option('-s', '--sortby', dest='sortfield',
help=_("Sort cases by a particular field. Available "
"fields to sort by are: 'caseNumber' (default), "
"'createdDate', 'lastModifiedDate', 'severity', "
"'status'. (optional)"), default='caseNumber'),
Option('-a', '--ascending', dest='sortorder',
action='store_const', const='ASC',
help=_('Sort results in ascending order. Default is '
'to sort in descending order (optional)'),
| default='DESC')]
def _check_case_group(self):
if self._options['casegroup']:
valid_groups = []
given_groupAry = str(self._options['casegroup']).split(',')
real_gro | upAry = common.get_groups()
for i in given_groupAry:
match = False
for j in real_groupAry:
if i.lower() == j.get_name().lower() or \
i == str(j.get_number()):
valid_groups.append(j.get_number())
match = True
if(not match):
msg = _("Unable to find case group %s" % i)
print msg
raise Exception(msg)
if len(valid_groups) > 0:
self._caseGroupNumbers = valid_groups
logger.log(logging.INFO,
'Casegroup(%s) casegroupnumber(%s)' % (
given_groupAry,
self._caseGroupNumbers))
def _check_owner(self):
# Firstly, determine for whom listcases is being run and if they're a
# Red Ha |
ActiveState/code | recipes/Python/173071_ReseekFile/recipe-173071.py | Python | mit | 5,077 | 0.003151 | # Written in 2003 by Andrew Dalke, Dalke Scientific Software, LLC.
# This software has been released to the public domain. No
# copyright is asserted.
from cStringIO import StringIO
class ReseekFile:
"""wrap a file handle to allow seeks back to the beginning
Takes a file handle in the constructor.
See the module docstring for more documentation.
"""
def __init__(self, file):
self.file = file
self.buffer_file = StringIO()
self.at_beginning = 1
try:
self.beginning = file.tell()
except (IOError, AttributeError):
self.beginning = 0
self._use_buffer = 1
def seek(self, offset, whence = 0):
"""offset, whence = 0
Seek to a given byte position. Only supports whence == 0
and offset == the initial value of ReseekFile.tell() (which
is usually 0, but not always.)
"""
if whence != 0:
raise TypeError("Unexpected whence value of %s; expecting 0" % \
(whence,))
if offset != self.beginning:
raise TypeError("Unexpected offset value of %r; expecting '%s'" % \
(offset, self.beginning))
self.buffer_file.seek(0)
self.at_beginning = 1
def tell(self):
"""the current position of the file
The initial position may not be 0 if the underlying input
file supports tell and it not at position 0.
"""
if not self.at_beginning:
raise TypeError("ReseekFile cannot tell except at the beginning of file")
return self.beginning
def _read(self, size):
if size < 0:
y = self.file.read()
z = self.buffer_file.read() + y
if self._use_buffer:
self.buffer_file.write(y)
return z
if size == 0:
return ""
x = self.buffer_file.read(size)
if len(x) < size:
y = self.file.read(size - len(x))
if self._use_buffer:
self.buffer_file.write(y)
return x + y
return x
def read(self, size = -1):
"""read up to 'size' bytes from the file
Default is -1, which means to read to end of file.
"""
x = self._read(size)
if self.at_beginning and x:
self.at_beginning = 0
self._check_no_buffer()
return x
def readline(self):
"""read a line from the file"""
# Can we get it out of the buffer_file?
s = self.buffer_file.readline()
if s[-1:] == "\n":
return s
# No, so now we read a line from the input file
t = self.file.readline()
# Append the new data to the buffer, if still buffering
if self._use_buffer:
self.buffer_file.write(t)
self._check_no_buffer()
return s + t
def readlines(self):
"""read all remaining lines from the file"""
s = self.read()
lines = []
i, j = 0, s.find("\n")
while j > -1:
lines.append(s[i:j+1])
i = j+1
j = s.find("\n", i)
if i < len(s):
# Only get here if the last line doesn't have a newline
lines.append(s[i:])
return lines
def _check_no_buffer(self):
# If 'nobuffer' called and finished with the buffer file
# then get rid of the buffer and redirect everythin | g to
# the original input file.
if self._use_buffer == 0 and self.buffer_file.tell() == \
len(self.buffer_file.getvalue()):
# I'm doing this for the slightly better performance
self.seek = get | attr(self.file, "seek", None)
self.tell = getattr(self.file, "tell", None)
self.read = self.file.read
self.readline = self.file.readline
self.readlines = self.file.readlines
del self.buffer_file
def nobuffer(self):
"""tell the ReseekFile to stop using the buffer once it's exhausted"""
self._use_buffer = 0
def prepare_input_source(source):
"""given a URL, returns a xml.sax.xmlreader.InputSource
Works like xml.sax.saxutils.prepare_input_source. Wraps the
InputSource in a ReseekFile if the URL returns a non-seekable
file.
To turn the buffer off if that happens, you'll need to do
something like
f = source.getCharacterStream()
...
try:
f.nobuffer()
except AttributeError:
pass
or
if isinstance(f, ReseekFile):
f.nobuffer()
"""
from xml.sax import saxutils
source = saxutils.prepare_input_source(source)
# Is this correct? Don't know - don't have Unicode exprerience
f = source.getCharacterStream() or source.getByteStream()
try:
f.tell()
except (AttributeError, IOError):
f = ReseekFile.ReseekFile(f)
source.setByteStream(f)
source.setCharacterStream(None)
return source
|
joergdietrich/astropy | astropy/visualization/wcsaxes/tests/datasets.py | Python | bsd-3-clause | 1,655 | 0.000604 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Downloads the FITS files that are used in image testing and for building documentation.
"""
import time
from ....utils.data import download_file
from ....io import fits
__all__ = ['fetch_msx_hdu',
'fetch_rosat_hdu',
'fetch_twoMASS_k_hdu',
'fetch_l1448_co_hdu',
'fetch_bolocam_hdu',
]
MAX_RETRIES = 10
TIME_BETWEEN_RETRIES = 5
URL = 'http://data.astropy.org/'
def fetch_hdu(filename, cache=True):
"""Download a FITS file to the cache and open HDU 0.
"""
for retry in range(MAX_RETRIES):
try:
path = download_file(URL + filename, cache=cache, timeout=30)
except URLError:
if retry == MAX_RETRIES - 1:
raise
else:
time.sleep | (TIME_BETWEEN_RETRIES)
else:
break
else:
raise Exception("Failed to download file {0}".format(filename))
return fits.open(path)[0]
def fetch_msx_hdu(cache=True):
"""Fetch the MSX example dataset HDU.
Returns
-------
hdu : `~astropy.io.fits.ImageHDU`
Image HDU
"""
return fet | ch_hdu('galactic_center/gc_msx_e.fits', cache=cache)
def fetch_rosat_hdu(cache=True):
return fetch_hdu('allsky/allsky_rosat.fits', cache=cache)
def fetch_twoMASS_k_hdu(cache=True):
return fetch_hdu('galactic_center/gc_2mass_k.fits', cache=cache)
def fetch_l1448_co_hdu(cache=True):
return fetch_hdu('l1448/l1448_13co.fits', cache=cache)
def fetch_bolocam_hdu(cache=True):
return fetch_hdu('galactic_center/gc_bolocam_gps.fits', cache=cache)
|
funkring/fdoo | addons-funkring/subscription_invoice/__init__.py | Python | agpl-3.0 | 36 | 0.027778 | import wizar | d
import | account_invoice |
cytec/SickRage | lib/validators/__init__.py | Python | gpl-3.0 | 551 | 0 | from .between import between # noqa
from .domain import d | omain # noqa
from .email import email # noqa
from .extremes import Max, Min # noqa
from | .i18n import fi_business_id, fi_ssn # noqa
from .iban import iban # noqa
from .ip_address import ipv4, ipv6 # noqa
from .length import length # noqa
from .mac_address import mac_address # noqa
from .slug import slug # noqa
from .truthy import truthy # noqa
from .url import url # noqa
from .utils import ValidationFailure, validator # noqa
from .uuid import uuid # noqa
__version__ = '0.10'
|
Elico-Corp/odoo-addons | website_recaptcha/models/__init__.py | Python | agpl-3.0 | 1,075 | 0 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2010-2014 Elico Co | rp. All Rights Reserved.
# Augustin Cisterne-Kaas <augustin.cisterne-kaas@elico-corp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the | hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import res_config
|
westfeld/fritz-speed | create-rra.py | Python | mit | 1,072 | 0.009328 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
create-rra.py
creates RRD file with 60 seconds primary step length
four datasources which are | counters
three RRA for one day, one week and one mont
"""
import fritzconnection
import r | rdtool
import os
from common import read_configuration
def get_link_speed():
fc = fritzconnection.FritzConnection()
status = fc.call_action('WANCommonInterfaceConfig', 'GetCommonLinkProperties')
downstream = status['NewLayer1DownstreamMaxBitRate'] / 8.
upstream = status['NewLayer1UpstreamMaxBitRate'] / 8.
return (upstream, downstream)
def main():
link_speeds = get_link_speed()
max_speeds = tuple([speed*1.1 for speed in link_speeds])
rrdtool.create(prefs['rra_filename'], '--step', '60',
'DS:bytes-up:COUNTER:500:0:'+str(max_speeds[0]),
'DS:bytes-down:COUNTER:500:0:'+str(max_speeds[1]),
'RRA:AVERAGE:0.8:1:1440',
'RRA:AVERAGE:0.8:10:1008',
'RRA:AVERAGE:0.8:60:5040' )
if __name__ == '__main__':
prefs = read_configuration('fritz-speed.ini')
main()
|
mopsalarm/pr0gramm-meta | update/main.py | Python | apache-2.0 | 13,461 | 0.001634 | #!/usr/bin/env python3
import argparse
import itertools
import queue
import re
import subprocess
import sys
import threading
import time
from collections import deque
from collections import namedtuple
from io import BytesIO
import datadog
import logbook
import pcc
import requests
from PIL import Image
from attrdict import AttrDict as attrdict
# noinspection PyUnresolvedReferences
import signal
logger = logbook.Logger("pr0gramm-meta")
logger.info("initialize datadog metrics")
datadog.initialize()
stats = datadog.ThreadStats()
stats.start()
Item = namedtuple("Item", ["id", "promoted", "up", "down",
"created", "image", "thumb", "fullsize", "source", "flags",
"user", "mark"])
Tag = namedtuple("Tag", ["id", "item_id", "confidence", "tag"])
User = namedtuple("User", ["id", "name", "registered", "score"])
def metric_name(suffix):
return "pr0gramm.meta.update." + suffix
class SetQueue(queue.Queue):
"""This queue only contains unique values"""
def __init__(self, maxsize=0, key=lambda x: x):
super().__init__(maxsize)
self.keyfunc = key
# Initialize the queue representation
def _init(self, maxsize):
self.keys = set()
self.queue = deque()
def _qsize(self):
assert len(self.queue) == len(self.keys), "length of queue and keys not equal"
return len(self.queue)
# Put a new item in the queue
def _put(self, item):
key = self.keyfunc(item)
if key not in self.keys:
self.keys.add(key)
self.queue.append(item)
# Get an item from the queue
def _get(self):
item = self.queue.popleft()
self.keys.remove(self.keyfunc(item))
return item
class UserSetQueue(SetQueue):
def __init__(self):
super().__init__(key=str.lower)
def _put(self, item):
stats.gauge(metric_name("queue.users"), len(self.keys), sample_rate=0.01)
super()._put(item)
def _get(self):
stats.gauge(metric_name("queue.users"), len(self.keys), sample_rate=0.01)
return super()._get()
# just put a user in this queue to download its details
user_queue = UserSetQueue()
def it | erate_posts(start=None):
base_url = "http://pr0gramm.com/api/items/get?flags=7"
while True:
url = base_url + "&older=%d" % s | tart if start else base_url
# :type: requests.Response
with stats.timer(metric_name("request.feed")):
response = requests.get(url)
response.raise_for_status()
json = response.json()
for item in json["items"]:
item = Item(**item)
start = min(start or item.id, item.id)
yield item
if json["atEnd"]:
break
def chunker(n, iterable):
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it, n))
if not chunk:
return
yield chunk
@stats.timed(metric_name("request.user"))
def get_user_details(name):
url = "http://pr0gramm.com/api/profile/info"
response = requests.get(url, params={"name": name, "flags": "1"})
content = response.json()
user = attrdict(content).user
# convert to named tuple
return User(user.id, user.name, user.registered, user.score)
def store_user_details(database, details):
with database, database.cursor() as cursor:
cursor.execute("INSERT INTO users VALUES (%s, %s, %s, %s)"
" ON CONFLICT(id) DO UPDATE SET score=%s",
list(details) + [details.score])
cursor.execute("INSERT INTO user_score VALUES (%s, %s, %s)",
[details.id, int(time.time()), details.score])
def update_user_details(dbpool):
while True:
user = user_queue.get()
try:
# noinspection PyTypeChecker
with dbpool.active() as database:
store_user_details(database, get_user_details(user))
time.sleep(1)
except IOError:
pass
@stats.timed(metric_name("request.size"), tags=["image"])
def get_image_size(image_url, size=1024):
# :type: requests.Response
response = requests.get(image_url, headers={"Range": "bytes=0-%d" % (size - 1)}, stream=True)
response.raise_for_status()
try:
image = Image.open(response.raw)
return image.size
finally:
response.close()
@stats.timed(metric_name("request.size"), tags=["video"])
def get_video_size(video_url, size=16 * 1024):
# :type: requests.Response
response = requests.get(video_url, headers={"Range": "bytes=0-%d" % (size - 1)})
response.raise_for_status()
# ask avprobe for the size of the image
process = subprocess.Popen(
["timeout", "10s", "ffprobe", "-"], shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate(response.content)
# and extract result from output
width, height = re.search(br"Stream.* ([0-9]+)x([0-9]+)", stdout + stderr).groups()
return int(width), int(height)
def get_item_url(item):
if item.image.startswith("//"):
return "http:" + item.image
if item.image.startswith("http"):
return item.image
elif item.image.endswith((".mp4", ".webm")) :
return "http://vid.pr0gramm.com/" + item.image
else:
return "http://img.pr0gramm.com/" + item.image
def get_item_size(item):
filename = item.image.lower()
url = get_item_url(item)
if filename.endswith((".jpg", ".jpeg", ".png", ".gif")):
for byte_count in [1024, 4096, 8192, 16 * 1024, 64 * 1024]:
try:
width, height = get_image_size(url, size=byte_count)
return width, height
except IOError:
pass
if filename.endswith(".webm"):
try:
width, height = get_video_size(url)
return width, height
except (OSError, IOError):
pass
raise Exception("Could not get size of item {}".format(item.id))
def get_item_ids_in_table(db, items, table):
ids = ",".join(str(item.id) for item in items)
query = "SELECT id FROM %s WHERE id IN (%s)" % (table, ids)
with db, db.cursor() as cursor:
cursor.execute(query)
return {item_id for item_id, in cursor}
def get_items_not_in_table(db, items, table):
items_tuple = tuple(items)
item_ids = get_item_ids_in_table(db, items_tuple, table)
return [item for item in items_tuple if item.id not in item_ids]
def update_item_sizes(database, items):
"""
Downloads sizes for a list of items.
:param database: A database connection to use for storing the items.
:param tuple[items] items: The items to process
"""
# get the items that need updates
for item in get_items_not_in_table(database, items, "sizes"):
# noinspection PyBroadException
try:
width, height = get_item_size(item)
except KeyboardInterrupt:
raise
except:
logger.exception()
continue
with database, database.cursor() as cursor:
cursor.execute("INSERT INTO sizes VALUES (%s, %s, %s)"
" ON CONFLICT(id) DO NOTHING", (item.id, width, height))
def update_item_previews(database, items):
# get the items that need updates
for item in get_items_not_in_table(database, items, "item_previews"):
# noinspection PyBroadException
try:
filename = item.image.lower()
url = get_item_url(item)
logger.debug("Update preview for {}", url)
# generate thumbnail
png_bytes = subprocess.check_output([
"timeout", "10s",
"ffmpeg", "-loglevel", "panic", "-y", "-i", url,
"-vf", "scale=8:-1", "-frames", "1",
"-f", "image2", "-vcodec", "png", "-"])
image = Image.open(BytesIO(png_bytes)).convert("RGB")
width, height = image.size
preview = bytearray()
for r, g, b in image.getdata():
# convert to rgb |
centrumholdings/buildbot | buildbot/status/web/authz.py | Python | gpl-2.0 | 2,513 | 0.003979 | from buildbot.status.web.auth import IAuth
class Authz(object):
"""Decide who can do what."""
knownActions = [
# If you add a new action here, be sure to also update the documentation
# at docs/cfg-statustargets.texinfo
'gracefulShutdown',
'forceBuild',
'forceAllBuilds',
'pingBuilder',
'stopBuild',
'stopAllBuilds',
'cancelPendingBuild',
]
def __init__(self,
default_action=False,
auth=None,
**kwargs):
self.auth = auth
if auth:
assert IAuth.providedBy(auth)
self.config = dict( (a, default_action) for a in self.knownActions )
for act in self.knownActions:
if act in kwargs:
self.config[act] = kwargs[act]
del kwargs[act]
if kwargs:
raise ValueError("unknown authorization action(s) " + ", ".join(kwargs.keys()))
def advertiseAction(self, action):
"""Should the web interface even show the form for ACTION?"""
if action not in self.knownActions:
raise KeyError("unknown action")
cfg = self.config.get(action, False)
if cfg:
return True
return False
def needAuthForm(self, action):
"""Does this action require an authentication form?"""
if action not in self.knownActions:
raise KeyError("unknown action")
cfg = self.config.get(action, False)
if cfg == 'auth' or callable(cfg):
return True
return False
def actionAllowed(self, action, request, *args):
"""Is this ACTION allowed, given this http REQUEST?"""
if action not in self.knownActions:
raise KeyError("unknown action")
cfg = self.config.get(action, False)
if cfg:
if cfg == 'auth' or callable(cfg):
if not self.auth:
return False
user = request.args. | get("username", ["<unknown>"])[0]
passwd = request.args.get("passwd", ["<no-password>"])[0]
if user == "<unknown>" or passwd == "<no-password>":
return False
if self.auth.authenticate(user, passwd):
if callable(cfg) and not cfg(user, *args):
return False
return True
return False
e | lse:
return True # anyone can do this..
|
sigopt/sigopt-python | test/orchestrate/services/aws_provider_bag_test.py | Python | mit | 1,060 | 0.00283 | import pytest
from mock import Mock
from sigopt.orchestrate.services.aws_provider_bag import AwsProviderServiceBag
class TestOrchestrateServiceBag(object):
@pytest.fixture
def orchestrate_services(self):
return Mock()
def test_orchestrate_service_bag(self, orchestrate_services):
services = AwsPr | oviderServiceBag(orchestrate_services)
assert services.cloudformation_service is not None
assert services.c | loudformation_service.client is not None
assert services.cloudformation_service.cloudformation is not None
assert services.ec2_service is not None
assert services.ec2_service.ec2 is not None
assert services.ecr_service is not None
assert services.ecr_service.client is not None
assert services.eks_service is not None
assert services.eks_service.client is not None
assert services.iam_service is not None
assert services.iam_service.client is not None
assert services.iam_service.iam is not None
assert services.sts_service is not None
assert services.sts_service.client is not None
|
nis-sdn/odenos | src/main/python/org/o3project/odenos/core/component/conversion_table.py | Python | apache-2.0 | 7,233 | 0.000138 | # -*- coding:utf-8 -*-
# Copyright 2015 NEC Corporation. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
class ConversionTable(object):
def __init__(self):
self.__connection_type_map = {}
self.__network_conversion_table = {}
self.__node_conversion_table = {}
self.__port_conversion_table = {}
self.__link_conversion_table = {}
self.__flow_conversion_table = {}
def get_connection_type(self, connection_id):
if connection_id in self.__connection_type_map:
return self.__connection_type_map[connection_id]
return None
def get_connection_list(self, connection_type):
connection_ids = []
for k_conn_id, v_conn_type in self.__connection_type_map.items():
if connection_type == v_conn_type:
connection_ids.append(k_conn_id)
return connection_ids
def is_connection_type(self, connection_type):
if connection_type is None or\
len(self.get_connection_list(connection_type)) == 0:
return False
return True
def add_entry_connection_type(self, connection_id, connection_type):
self.__connection_type_map[connection_id] = connection_type
def del_entry_connection_type(self, connection_id):
if self.__connection_type_map.has_key(connection_id):
del self.__connection_type_map[connection_id]
def get_network(self, network_id):
networks = []
if network_id in self.__network_conversion_table:
networks = self.__network_conversion_table[network_id]
return networks
def get_node(self, network_id, node_id):
nodes = []
key = network_id + "::" + node_id
if key in self.__node_conversion_table:
nodes = self.__node_conversion_table[key]
return nodes
def get_port(self, network_id, node_id, port_id):
ports = []
key = network_id + "::" + node_id + "::" + port_id
if key in self.__port_conversion_table:
ports = self.__port_conversion_table[key]
return ports
def get_link(self, network_id, link_id):
links = []
key = network_id + "::" + link_id
if key in self.__link_conversion_table:
links = self.__link_conversion_table[key]
return links
def get_flow(self, network_id, flow_id):
flows = []
key = network_id + "::" + flow_id
if key in self.__flow_conversion_table:
flows = self.__flow_conversion_table[key]
return flows
def add_entry_network(self, nwc_id_1, nwc_id_2):
self.__add_entry_object(self.__network_conversion_table,
nwc_id_1,
nwc_id_2)
def add_entry_node(self, org_nwc_id, org_node_id,
rep_nwc_id, rep_node_id):
key = org_nwc_id + "::" + org_node_id
value = rep_nwc_id + "::" + rep_node_id
self.__add_entry_object(self.__node_conversion_table,
key,
value)
def add_entry_port(self, org_nwc_id, org_node_id, org_port_id,
rep_nwc_id, rep_node_id, rep_port_id):
key = org_nwc_id + "::" + org_node_id + "::" + org_port_id
value = rep_nwc_id + "::" + rep_node_id + "::" + rep_port_id
self.__add_entry_object(self.__port_conversion_table,
key,
value)
def add_entry_link(self, org_nwc_id, org_link_id,
rep_nwc_id, rep_link_id):
key = org_nwc_id + "::" + org_link_id
value = rep_nwc_id + "::" + rep_link_id
self.__add_entry_object(self.__link_conversion_table,
key,
value)
def add_entry_flow(self, org_nwc_id, org_flow_id,
rep_nwc_id, rep_flow_id):
key = org_nwc_id + "::" + org_flow_id
value = rep_nwc_id + "::" + rep_flow_id
self.__add_entry_object(self.__flow_conversion_table,
key,
value)
def __add_entry_object(self, conv_table_obj, key, value):
# key setting
if key not in conv_table_obj:
conv_table_obj[key] = []
conv_table_obj[key].append(value)
# value -> key setting(reverse setting)
if value not in conv_table_obj:
conv_table_obj[value] = []
conv_table_obj[value].append(key)
def del_entry_network(self, key):
self.__del_entry_object(self.__network_conversion_table, key)
def del_entry_node(self, network_id, node_id):
# delete Port => Node.
del_port_list = []
for port_id in self.__port_conversion_table:
port_list = port_id.split("::")
if port_list[0] == network_id and\
port_list[1] == node_id:
del_port_list.append(port_id)
| for port_id in del_port_list:
self.__del_entry_object(s | elf.__port_conversion_table,
port_id)
key = network_id + "::" + node_id
self.__del_entry_object(self.__node_conversion_table, key)
def del_entry_port(self, network_id, node_id, port_id):
key = network_id + "::" + node_id + "::" + port_id
self.__del_entry_object(self.__port_conversion_table, key)
def del_entry_link(self, network_id, link_id):
key = network_id + "::" + link_id
self.__del_entry_object(self.__link_conversion_table, key)
def del_entry_flow(self, network_id, flow_id):
key = network_id + "::" + flow_id
self.__del_entry_object(self.__flow_conversion_table, key)
def __del_entry_object(self, conv_table_obj, key):
if key not in conv_table_obj:
return
# value -> key remove(reverse setting remove)
reverse_keys = conv_table_obj[key]
for reverse_key in reverse_keys:
if reverse_key not in conv_table_obj:
continue
if len(conv_table_obj[reverse_key]) > 1:
conv_table_obj[reverse_key].remove(key)
continue
del conv_table_obj[reverse_key]
del conv_table_obj[key]
|
pstreck/django-videokit | videokit/fields.py | Python | mit | 9,739 | 0.006366 | from django.conf import settings
from django.core.files import File
from django.db.models.fields.files import FieldFile
from django.db.models.fields.files import FileDescriptor
from datetime import datetime
import os.path
import subprocess
from videokit.apps import VideokitConfig
from videokit.tasks import generate_video
def get_video_dimensions(file):
path = os.path.join(settings.MEDIA_ROOT, file.n | ame)
if os.path.isfile(path):
try:
process = subprocess.Popen(
['mediainfo', '--Inform=Video;%Width%', path],
stdout = subprocess.PIPE, stderr = subproce | ss.PIPE)
stdout, stderr = process.communicate()
if process.wait() == 0:
width = int(stdout.decode('utf8').strip(' \t\n\r'))
else:
return (0,0)
process = subprocess.Popen(
['mediainfo', '--Inform=Video;%Height%', path],
stdout = subprocess.PIPE, stderr = subprocess.PIPE)
stdout, stderr = process.communicate()
if process.wait() == 0:
height = int(stdout.decode('utf8').strip(' \t\n\r'))
else:
return (None, None)
return (width, height)
except OSError:
pass
return (None, None)
def get_video_rotation(file):
path = os.path.join(settings.MEDIA_ROOT, file.name)
if os.path.isfile(path):
try:
process = subprocess.Popen(
['mediainfo', '--Inform=Video;%Rotation%', path],
stdout = subprocess.PIPE, stderr = subprocess.PIPE)
stdout, stderr = process.communicate()
if process.wait() == 0:
try:
rotation = float(stdout.decode('utf8').strip(' \t\n\r'))
except ValueError:
rotation = 0.0
return rotation
except OSError:
pass
return 0.0
def get_video_mimetype(file):
path = os.path.join(settings.MEDIA_ROOT, file.name)
if os.path.isfile(path):
try:
process = subprocess.Popen(
['mediainfo', '--Inform=Video;%InternetMediaType%', path],
stdout = subprocess.PIPE, stderr = subprocess.PIPE)
stdout, stderr = process.communicate()
if process.wait() == 0:
mimetype = stdout.decode('utf8').strip(' \t\n\r')
if mimetype == 'video/H264':
mimetype = 'video/mp4'
if mimetype == '':
mimetype = 'video/mp4'
return mimetype
except OSError:
pass
return ''
def get_video_duration(file):
path = os.path.join(settings.MEDIA_ROOT, file.name)
if os.path.isfile(path):
try:
process = subprocess.Popen(
['mediainfo', '--Inform=Video;%Duration%', path],
stdout = subprocess.PIPE, stderr = subprocess.PIPE)
stdout, stderr = process.communicate()
if process.wait() == 0:
try:
duration = int(stdout.decode('utf8').strip(' \t\n\r'))
except ValueError:
duration = 0
return duration
except OSError:
pass
return 0
def get_video_thumbnail(file):
path = os.path.join(settings.MEDIA_ROOT, file.name)
thumbnail_name = '%s%s' % (file.name, '.thumb.jpg')
thumbnail_path = os.path.join(settings.MEDIA_ROOT, thumbnail_name)
if os.path.isfile(path):
try:
process = subprocess.Popen(
['ffmpeg', '-i', path, '-frames', '1', '-y', thumbnail_path],
stdout = subprocess.PIPE, stderr = subprocess.PIPE)
if process.wait() == 0:
return thumbnail_name
except OSError:
pass
return ''
class VideoFile(File):
def _get_width(self):
return self._get_video_dimensions()[0]
width = property(_get_width)
def _get_height(self):
return self._get_video_dimensions()[1]
height = property(_get_height)
def _get_rotation(self):
return self._get_video_rotation()
rotation = property(_get_rotation)
def _get_mimetype(self):
return self._get_video_mimetype()
mimetype = property(_get_mimetype)
def _get_duration(self):
return self._get_video_duration()
duration = property(_get_duration)
def _get_thumbnail(self):
return self._get_video_thumbnail()
thumbnail = property(_get_thumbnail)
def _get_video_dimensions(self):
if not hasattr(self, '_dimensions_cache'):
self._dimensions_cache = get_video_dimensions(self)
return self._dimensions_cache
def _get_video_rotation(self):
if not hasattr(self, '_rotation_cache'):
self._rotation_cache = get_video_rotation(self)
return self._rotation_cache
def _get_video_mimetype(self):
if not hasattr(self, '_mimetype_cache'):
self._mimetype_cache = get_video_mimetype(self)
return self._mimetype_cache
def _get_video_duration(self):
if not hasattr(self, '_duration_cache'):
self._duration_cache = get_video_duration(self)
return self._duration_cache
def _get_video_thumbnail(self):
if not hasattr(self, '_thumbnail_cache'):
self._thumbnail_cache = get_video_thumbnail(self)
return self._thumbnail_cache
class VideoFileDescriptor(FileDescriptor):
def __set__(self, instance, value):
previous_file = instance.__dict__.get(self.field.name)
super(VideoFileDescriptor, self).__set__(instance, value)
if previous_file is not None:
self.field.update_dimension_fields(instance, force = True)
self.field.update_rotation_field(instance, force = True)
self.field.update_mimetype_field(instance, force = True)
self.field.update_duration_field(instance, force = True)
self.field.update_thumbnail_field(instance, force = True)
class VideoFieldFile(VideoFile, FieldFile):
def delete(self, save = True):
if hasattr(self, '_dimensions_cache'):
del self._dimensions_cache
if hasattr(self, '_rotation_cache'):
del self._rotation_cache
if hasattr(self, '_mimetype_cache'):
del self._mimetype_cache
if hasattr(self, '_duration_cache'):
del self._duration_cache
if hasattr(self, '_thumbnail_cache'):
del self._thumbnail_cache
super(VideoFieldFile, self).delete(save)
class VideoSpecFieldFile(VideoFieldFile):
def _require_file(self):
if not self.source_file:
raise ValueError('The \'%s\' attribute\'s source has no file associated with it.' % self.field_name)
else:
self.validate()
def delete(self, save = True):
if hasattr(self, '_generated_cache'):
del self._generated_cache
super(VideoSpecFieldFile, self).delete(save)
def validate(self):
return self.field.video_cache_backend.validate(self)
def invalidate(self):
return self.field.video_cache_backend.invalidate(self)
def clear(self):
return self.field.video_cache_backend.clear(self)
def generate(self):
if not self.generating() and not self.generated():
file_name = self.generate_file_name()
options = []
if self.field.format == 'mp4':
options = ['-c:v', 'libx264', '-c:a', 'libfdk_aac', '-b:v', '1M', '-b:a', '128k']
elif self.field.format == 'ogg':
options = ['-c:v', 'libtheora', '-c:a', 'libvorbis', '-q:v', '10', '-q:a', '6']
elif self.field.format == 'webm':
options = ['-c:v', 'libvpx', '-c:a', 'libvorbis', '-crf', '10', '-b:v', '1M']
self.name = file_name
self.instance.save()
generate_video.delay(file_name, self.source_file.name, options = options)
def generating(self):
if self.name:
base = getat |
KrzysztofStachanczyk/Sensors-WWW-website | www/env/lib/python2.7/site-packages/django/core/files/uploadhandler.py | Python | gpl-3.0 | 6,876 | 0.001454 | """
Base file upload handler classes, and the built-in concrete subclasses
"""
from __future__ import unicode_literals
from io import BytesIO
from django.conf import settings
from django.core.files.uploadedfile import (
InMemoryUploadedFile, TemporaryUploadedFile,
)
from django.utils.encoding import python_2_unicode_compatible
from django.utils.module_loading import import_string
__all__ = [
'UploadFileException', 'StopUpload', 'SkipFile', 'FileUploadHandler',
'TemporaryFileUploadHandler', 'MemoryFileUploadHandler', 'load_handler',
'StopFutureHandlers'
]
class UploadFileException(Exception):
"""
Any error having to do with uploading files.
"""
pass
@python_2_unicode_compatible
class StopUpload(UploadFileException):
"""
This exception is raised when an upload must abort.
"""
def __init__(self, connection_reset=False):
"""
If ``connection_reset`` is ``True``, Django knows will halt the upload
without consuming the rest of the upload. This will cause the browser to
| show a "connection reset" error.
"""
self.connection_reset = connection_reset
def __str__(self):
if self.connection_reset:
return 'StopUpload: Halt current upload.'
else:
return 'StopUpload: Consume request | data, then halt.'
class SkipFile(UploadFileException):
"""
This exception is raised by an upload handler that wants to skip a given file.
"""
pass
class StopFutureHandlers(UploadFileException):
"""
Upload handers that have handled a file and do not want future handlers to
run should raise this exception instead of returning None.
"""
pass
class FileUploadHandler(object):
"""
Base class for streaming upload handlers.
"""
chunk_size = 64 * 2 ** 10 # : The default chunk size is 64 KB.
def __init__(self, request=None):
self.file_name = None
self.content_type = None
self.content_length = None
self.charset = None
self.content_type_extra = None
self.request = request
def handle_raw_input(self, input_data, META, content_length, boundary, encoding=None):
"""
Handle the raw input from the client.
Parameters:
:input_data:
An object that supports reading via .read().
:META:
``request.META``.
:content_length:
The (integer) value of the Content-Length header from the
client.
:boundary: The boundary from the Content-Type header. Be sure to
prepend two '--'.
"""
pass
def new_file(self, field_name, file_name, content_type, content_length, charset=None, content_type_extra=None):
"""
Signal that a new file has been started.
Warning: As with any data from the client, you should not trust
content_length (and sometimes won't even get it).
"""
self.field_name = field_name
self.file_name = file_name
self.content_type = content_type
self.content_length = content_length
self.charset = charset
self.content_type_extra = content_type_extra
def receive_data_chunk(self, raw_data, start):
"""
Receive data from the streamed upload parser. ``start`` is the position
in the file of the chunk.
"""
raise NotImplementedError('subclasses of FileUploadHandler must provide a receive_data_chunk() method')
def file_complete(self, file_size):
"""
Signal that a file has completed. File size corresponds to the actual
size accumulated by all the chunks.
Subclasses should return a valid ``UploadedFile`` object.
"""
raise NotImplementedError('subclasses of FileUploadHandler must provide a file_complete() method')
def upload_complete(self):
"""
Signal that the upload is complete. Subclasses should perform cleanup
that is necessary for this handler.
"""
pass
class TemporaryFileUploadHandler(FileUploadHandler):
"""
Upload handler that streams data into a temporary file.
"""
def __init__(self, *args, **kwargs):
super(TemporaryFileUploadHandler, self).__init__(*args, **kwargs)
def new_file(self, *args, **kwargs):
"""
Create the file object to append to as data is coming in.
"""
super(TemporaryFileUploadHandler, self).new_file(*args, **kwargs)
self.file = TemporaryUploadedFile(self.file_name, self.content_type, 0, self.charset, self.content_type_extra)
def receive_data_chunk(self, raw_data, start):
self.file.write(raw_data)
def file_complete(self, file_size):
self.file.seek(0)
self.file.size = file_size
return self.file
class MemoryFileUploadHandler(FileUploadHandler):
"""
File upload handler to stream uploads into memory (used for small files).
"""
def handle_raw_input(self, input_data, META, content_length, boundary, encoding=None):
"""
Use the content_length to signal whether or not this handler should be in use.
"""
# Check the content-length header to see if we should
# If the post is too large, we cannot use the Memory handler.
if content_length > settings.FILE_UPLOAD_MAX_MEMORY_SIZE:
self.activated = False
else:
self.activated = True
def new_file(self, *args, **kwargs):
super(MemoryFileUploadHandler, self).new_file(*args, **kwargs)
if self.activated:
self.file = BytesIO()
raise StopFutureHandlers()
def receive_data_chunk(self, raw_data, start):
"""
Add the data to the BytesIO file.
"""
if self.activated:
self.file.write(raw_data)
else:
return raw_data
def file_complete(self, file_size):
"""
Return a file object if we're activated.
"""
if not self.activated:
return
self.file.seek(0)
return InMemoryUploadedFile(
file=self.file,
field_name=self.field_name,
name=self.file_name,
content_type=self.content_type,
size=file_size,
charset=self.charset,
content_type_extra=self.content_type_extra
)
def load_handler(path, *args, **kwargs):
"""
Given a path to a handler, return an instance of that handler.
E.g.::
>>> from django.http import HttpRequest
>>> request = HttpRequest()
>>> load_handler('django.core.files.uploadhandler.TemporaryFileUploadHandler', request)
<TemporaryFileUploadHandler object at 0x...>
"""
return import_string(path)(*args, **kwargs)
|
Diti24/python-ivi | ivi/agilent/agilent34410A.py | Python | mit | 4,335 | 0.008535 | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2016 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of t | he Software, and to permit persons | to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import time
import struct
from .. import ivi
from .. import dmm
from .. import scpi
class agilent34410A(scpi.dmm.Base):
"Agilent 34410A IVI DMM driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', '34410A')
super(agilent34410A, self).__init__(*args, **kwargs)
self._memory_size = 5
self._identity_description = "Agilent 34410A/11A IVI DMM driver"
self._identity_identifier = ""
self._identity_revision = ""
self._identity_vendor = ""
self._identity_instrument_manufacturer = "Agilent Technologies"
self._identity_instrument_model = ""
self._identity_instrument_firmware_revision = ""
self._identity_specification_major_version = 4
self._identity_specification_minor_version = 1
self._identity_supported_instrument_models = ['34410A', '34411A']
self._add_method('memory.save',
self._memory_save)
self._add_method('memory.recall',
self._memory_recall)
self._add_method('memory.set_name',
self._set_memory_name)
self._add_method('memory.get_name',
self._get_memory_name)
def _initialize(self, resource = None, id_query = False, reset = False, **keywargs):
"Opens an I/O session to the instrument."
super(agilent34410A, self)._initialize(resource, id_query, reset, **keywargs)
# interface clear
if not self._driver_operation_simulate:
self._clear()
# check ID
if id_query and not self._driver_operation_simulate:
id = self.identity.instrument_model
id_check = self._instrument_id
id_short = id[:len(id_check)]
if id_short != id_check:
raise Exception("Instrument ID mismatch, expecting %s, got %s", id_check, id_short)
# reset
if reset:
self.utility.reset()
def _memory_save(self, index):
index = int(index)
if index < 1 or index > self._memory_size:
raise OutOfRangeException()
if not self._driver_operation_simulate:
self._write("*sav %d" % index)
def _memory_recall(self, index):
index = int(index)
if index < 1 or index > self._memory_size:
raise OutOfRangeException()
if not self._driver_operation_simulate:
self._write("*rcl %d" % index)
def _get_memory_name(self, index):
index = int(index)
if index < 1 or index > self._memory_size:
raise OutOfRangeException()
if not self._driver_operation_simulate:
return self._ask("memory:state:name? %d" % index).strip(' "')
def _set_memory_name(self, index, value):
index = int(index)
value = str(value)
if index < 1 or index > self._memory_size:
raise OutOfRangeException()
if not self._driver_operation_simulate:
self._write("memory:state:name %d, \"%s\"" % (index, value))
|
ojotoxy/iTerm2 | api/examples/python/remote_control.py | Python | gpl-2.0 | 3,599 | 0.012781 | #!/usr/bin/python
# This is python 2.7 on macOS 10.12.
from __future__ import print_function
import api_pb2
import sys
import thread
import time
import websocket
callbacks = []
DEBUG=0
def SendRPC(ws, message, callback):
if DEBUG > 0:
print(">>> " + str(message))
ws.send(message.SerializeToString(), opcode=websocket.ABNF.OPCODE_BINARY)
callbacks.append(callback)
def handle_notification(ws, notification):
def handle_custom_escape_sequence_notification(custom_escape_sequence_notification):
# -- Your logic goes here --
print(custom_escape_sequence_notification.sender_identity + " sends message " + custom_escape_sequence_notification.payload)
def handle_new_session_notification(new_session_notification):
subscribe_to_custom_escape_sequence(ws, new_session_notification.uniqueIdentifier)
if notification.HasField('custom_escape_sequence_notification'):
handle_custom_escape_sequence_notification(notification.custom_escape_sequence_notification)
elif notification.HasField('new_session_notification'):
handle_new_session_notification(notification.new_session_notification)
def handle_notification_response(response):
if not response.HasField('notification_response'):
print("Malformed notification response")
print(str(response))
return
if response.notification_response.status != api_pb2.NotificationResponse.OK:
print("Bad status in notification response")
print(str(response))
return
def subscribe_to_new_sessions(ws):
request = api_pb2.Request()
request.notification_request.subscribe = True
request.notification_request.notification_type = api_pb2.NOTIFY_ON_NEW_SESSION
SendRPC(ws, request, handle_notification_response)
def subscribe_to_custom_escape_sequence(ws, session):
request = api_pb2.Request()
request.notification_request.subscribe = True
request.notification_request.session = session
request.notification_request.notification_type = api_pb2.NOTIFY_ON_CUSTOM_ESCAPE_SEQUENCE
SendRPC(ws, request, handle_notification_response)
def main(argv):
def on_message(ws, message):
response = api_pb2.Response()
response.ParseFromString(message)
if DEBUG > 0:
print("<<< " + str(response))
if response.HasField('notification'):
handle_notification(ws, response.notification)
else:
global callbacks
callback = callbacks[0]
del callbacks[0]
callback(response)
def on_error(ws, error):
print("Error: " + str(error))
def on_close(ws):
print("Connection closed")
def on_open(ws):
def list_sessions(ws):
def callback(response):
for window in response.list_sessions_response.windows:
for tab in window.tabs:
for session in tab.sessions:
subscribe_to_custom_escape_sequence(ws, session.uniqueIdentifier)
request = api_pb2.Request()
| request.list_sessions_request.SetInParent()
SendRPC(ws, request, callback)
subscribe_to_new_sessions(ws)
list_sessions( | ws)
#websocket.enableTrace(True)
ws = websocket.WebSocketApp("ws://localhost:1912/",
on_message = on_message,
on_error = on_error,
on_close = on_close,
subprotocols = [ 'api.iterm2.com' ])
ws.on_open = on_open
ws.run_forever()
if __name__ == "__main__":
main(sys.argv)
|
MultimediaSemantics/entity2vec | scripts/old/page_links_to_edge_list_wiki.py | Python | apache-2.0 | 3,039 | 0.015466 | import optparse
import pickle
#converts urls to wiki_id
parser = optparse.OptionParser()
parser.add_option('-i','--input', dest = 'input_file', help = 'input_file')
parser.add_option('-o','--output', dest = 'output_file', help = 'output_file')
(options, args) = parser.parse_args()
if options.input_file is None:
options.input_file = raw_input('Enter input file:')
if options | .output_file is None:
options.output_file = raw_input('En | ter output file:')
input_file = options.input_file
output_file = options.output_file
#define the dictionary url:wiki_id
wiki_from_url_dict = {}
with open('../../datasets/dbpedia/page_ids_en_2016.ttl','r') as f:
for line in f:
line = line.split(' ')
if line[0] == '#':
continue
url = line[0]
wiki_id_list = line[2].split('\"')
wiki_id = wiki_id_list[1]
print(url, wiki_id)
wiki_from_url_dict[url] = int(wiki_id)
output_file_write = open(output_file,'w')
#iterate through the page links and turn urls into wiki_ids
max_wiki_id = max(wiki_from_url_dict.values()) + 1
local_id = {}
count = 0
with open(input_file) as page_links:
for line in page_links:
line = line.split(' ')
if line[0] == '#':
continue
url_1 = line[0]
url_2 = line[2]
#if wiki_id not found, assign an id = max_wiki_id and increment max_wiki_id
try:
wiki_id1 = wiki_from_url_dict[url_1] #first entity has wiki_id
try:
wiki_id2 = wiki_from_url_dict[url_2] #first and second entities have wiki_ids
except (KeyError, IndexError): #first entity has wiki_id, second entity doesn't
try: #check if a local id has already been assigned
wiki_id2 = local_id[url_2]
except (KeyError, IndexError):
wiki_id2 = max_wiki_id
local_id[url_2] = wiki_id2
max_wiki_id +=1
except (KeyError, IndexError): #first entity doesn't have wiki_id
try:
wiki_id1 = local_id[url_1]
except (KeyError, IndexError):
wiki_id1 = max_wiki_id
local_id[url_1] = wiki_id1
max_wiki_id += 1
try: #first entity doesn't have wiki_id, second entity has it
wiki_id2 = wiki_from_url_dict[url_2]
except (KeyError, IndexError): #neither first nor second entity have wiki_ids
try: #check if a local id has already been assigned
wiki_id2 = local_id[url_2]
except (KeyError, IndexError):
wiki_id2 = max_wiki_id
local_id[url_2] = wiki_id2
max_wiki_id +=1
output_file_write.write('%d %d\n' %(wiki_id1,wiki_id2))
print count
count += 1
output_file_write.close()
pickle.dump(local_id,open('../../datasets/dbpedia/local_id_to_url_full_mapping_based.p','wb'))
|
InterestingLab/elasticmanager | elasticmanager/settings.py | Python | mit | 4,127 | 0.000727 | """
Django settings for elasticmanager project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '$yx)lw7*b3lx7-ozap5d@&%6=bndkmrjn8z)4z$g$x+kj)2xhj'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'kombu.transport.django',
'djcelery',
'cluster',
'indices',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'elasticmanager.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'elasticmanager.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
LOGGING = {
'version': 1,
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django.request': {
'handlers': ['console'],
'propagate': True,
'level': 'DEBUG',
}
},
}
# --- End of Django Config ---
# Celery Related Settings
# Using Django ORM API as celery broker has such limitations:
# http://docs.celeryproject.org/en/latest/getting-started/brokers/django.html
BROKER_URL = 'django://'
# set socket_timeout to prevent worker from stopping working,
# which is fetch task messages from broker, when using redis as broker
# see https://github.com/celery/celery/issues/1221
BROKER_TRANSPORT_OPTIONS = {'socket_timeout': 30}
CELERY_RESULT_BACKEND = 'redis://172.16.187.65:9103/0'
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_ACCEPT_CONTENT = ['json']
# prefetch only 1 task message at a time multiplied by the number of concurrent processes.
# This is good for | long running tasks
# see http://docs.celeryproject.org/en/latest/userguide/optimizing.html#prefetch-limits
CELERYD_PREFETCH_MULTIPLIER = 1
CELERYBEAT_SCHEDULER = 'djcelery.schedulers.DatabaseScheduler'
# --- End of Celery Config --- |
# django-celery config
import djcelery
djcelery.setup_loader()
# --- End of django-celery Config ---
|
azafred/skeletor | tests/test_advanced.py | Python | bsd-2-clause | 267 | 0 | # -*- co | ding: utf-8 -*-
from .context import sample
import unittest
class AdvancedTestSuite(unittest.TestCase):
"""Advanced test cases."" | "
def test_thoughts(self):
self.assertIsNone(sample.main())
if __name__ == '__main__':
unittest.main()
|
jkozerski/meteo | meteo_lcd/update_meteo.py | Python | apache-2.0 | 20,586 | 0.018313 | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
# Author:
# Janusz Kozerski (https://github.com/jkozerski)
# install:
# pip install python-dateutil
#
# for diagrams install:
# sudo python -m pip --no-cache-dir install -U matplotlib
#
# for database install:
# sqlite3 (apt-get install sqlite3)
#
#
# or for other diagrams (this seems to be too "heavy" for raspberry):
# pip install plotly
import re #regular expression
from shutil import move
from os import remove
from math import sqrt, floor
import datetime # datetime and timedelta structures
import time
# Changing user
import os # os.getuid
import pwd # pwd.getpwuid
import grp # grp.getgrnam
# Mosquito (data passing/sharing)
import paho.mqtt.client as mqtt
# Sqlite3 database
import sqlite3
# Needed for drawing a plot
import dateutil.parser
import matplotlib
import matplotlib.pyplot as plt
#import numpy as np
from matplotlib.ticker import MultipleLocator
##############################################################################################################
### Needed defines & constants
# choose working dir
working_dir = "/var/www/html/"
data_dir = "/home/pi/meteo/"
www_meteo_path = working_dir + "meteo.html"
www_meteo_path_tmp = working_dir + "meteo.html_tmp"
log_file_path = working_dir + "meteo.log"
db_path = data_dir + "meteo.db"
# Default user
default_user = 'pi'
# Diagiam file names
temp_out_diagram_file = working_dir + "temp_out.png"
humid_out_diagram_file = working_dir + "humid_out.png"
dew_point_out_diagram_file = working_dir + "dew_out.png"
pressure_diagram_file = working_dir + "pressure.png"
# We don't want to make update too often so we need to store last update time
# And compare it with current time - if current time is too small then do nothing
# Initialize it to some old time
last_update_time = datetime.datetime.fromtimestamp(1284286794)
last_log_time = datetime.datetime.fromtimestamp(1284286794)
last_plot_time = datetime.datetime.fromtimestamp(1284286794)
update_delay = datetime.timedelta(seconds = 60 * 1) # update delay in seconds -> 1 minute
log_delay = datetime.timedelta(seconds = 60 * 3) # update delay in seconds -> 3 minute
plot_delay = datetime.timedelta(seconds = 60 * 10) # update delay in seconds -> 1 minute
template_temp_out_begin = "<!-- TEMP_OUT -->"
template_temp_out_end = "<!-- /TEMP_OUT -->"
template_humid_out_begin = "<!-- HUMID_OUT -->"
template_humid_out_end = "<!-- /HUMID_OUT -->"
template_pressure_begin = "<!-- PRESS -->"
template_pressure_end = "<!-- /PRESS -->"
template_dew_point_out_begin = "<!-- DEW_OUT -->"
template_dew_point_out_end = "<!-- /DEW_OUT -->"
template_temp_in_begin = "<!-- TEMP_IN -->"
template_temp_in_end = "<!-- /TEMP_IN -->"
template_humid_in_begin = "<!-- HUMID_IN -->"
template_humid_in_end = "<!-- /HUMID_IN -->"
template_dew_point_in_begin = "<!-- DEW_IN -->"
template_dew_point_in_end = "<!-- /DEW_IN -->"
template_last_update_begin = "<!-- LAST_UPDATE -->"
template_last_update_end = "<!-- /LAST_UPDATE -->"
val_regexp = ".*"
template_temp_out = template_temp_out_begin + val_regexp + template_temp_out_end
template_humid_out = template_humid_out_begin + val_regexp + template_humid_out_end
template_pressure = template_pressure_begin + val_regexp + template_pressure_end
template_dew_point_out = template_dew_point_out_begin + val_regexp + template_dew_point_out_end
template_temp_in = template_temp_in_begin + val_regexp + template_temp_in_end
template_humid_in = template_humid_in_begin + val_regexp + template_humid_in_end
template_dew_point_in = template_dew_point_in_begin + val_regexp + template_dew_point_in_end
template_last_update = template_last_update_begin + val_regexp + template_last_update_end
##############################################################################################################
### Change running user to default user ('pi')
# Needed when script is running as root on system startup from /etc/init.d/
def change_user():
user = pwd.getpwuid( os.getuid() ).pw_name
if user == default_user :
print "User OK - '" + user + "'."
return
else:
print "Bad user '" + user + "', changing to '" + default_user + "'."
try:
# Remove group privileges
os.setgroups([])
# Try setting the new | uid/gid
| os.setgid(grp.getgrnam(default_user).gr_gid)
os.setuid(pwd.getpwnam(default_user).pw_uid)
except Exception as e:
print("Error while changing user." + str(e))
print "User changed from '" + user + "' to '" + default_user + "'."
##############################################################################################################
### Helpers functions
# Returns outside values: temp_out, humid_out, dew_point_out
def get_meteo_data_out():
return 20.4, 55, 12,
# Returns inside values: temp_in, humid_in
def get_meteo_data_in():
return 20.4, 40
# Returns air pressure
def get_meteo_pressure():
return 999.3
# Calculates dew point
# This should give a correct result for temperature in ranges -30 < T < 70 *C, and humidity 0-100%
def get_dew_point(temp, humid):
tmp = sqrt(sqrt(sqrt( float(humid)/100.0 ))) * (112.0 + (0.9 * float(temp))) + (0.1 * float(temp)) - 112.0;
return floor(tmp + 0.5);
# Log data to file
# We can use this data later to draw a plot
def log_to_file(temp_in, humid_in, dew_in, temp_out, humid_out, dew_out, pressure):
lf = open(log_file_path, "a");
t = datetime.datetime.now()
t = t.replace(microsecond=0)
new_line = t.isoformat() + ";" + str(temp_in) + ";" + str(humid_in) + ";" + str(dew_in) + ";" + str(temp_out) + ";" + str(humid_out) + ";" + str(dew_out) + ";" + str(pressure) + "\n"
lf.write(new_line)
lf.close()
# Converts a string back the datetime structure
def getDateTimeFromISO8601String(s):
d = dateutil.parser.parse(s)
return d
##############################################################################################################
### Database
# Creating table in database
def create_db ():
conn = sqlite3.connect(db_path)
c = conn.cursor()
sql = "CREATE TABLE IF NOT EXISTS log (\n\
id INTEGER PRIMARY KEY ASC,\n\
time INT NOT NULL,\n\
temp REAL,\n\
humid INT,\n\
dew_point INT,\n\
pressure REAL,\n\
temp_in REAL,\n\
humid_in INT,\n\
dew_point_in INT)"
c.execute(sql)
conn.commit()
conn.close()
def log_into_db (date_time, temp, humid, dew_point, pressure, temp_in, humid_in, dew_point_in):
conn = sqlite3.connect(db_path)
c = conn.cursor()
try:
int_time = int (time.mktime(date_time.timetuple()))
c.execute("INSERT INTO log (time, temp, humid, dew_point, pressure, temp_in, humid_in, dew_point_in) \
VALUES (?, ?, ?, ?, ?, ?, ?, ?)", (int_time, temp, humid, dew_point, pressure, temp_in, humid_in, dew_point_in))
conn.commit()
except Exception as e:
print("Error while insert log to database: " + str(e))
conn.close()
# Get values from last days and hours
def get_val_last_db(days, hours):
if days < 0 or days > 31:
return;
if hours < 0 or hours > 23:
return;
current_time = datetime.datetime.now()
begin_time = current_time - datetime.timedelta(days=days, hours=hours)
conn = sqlite3.connect(db_path)
c = conn.cursor()
c.execute("SELECT strftime('%s', (?))", (begin_time, ))
int_time_min = (c.fetchone())[0]
c.execute("SELECT strftime('%s', (?))", (current_time, ))
int_time_max = (c.fetchone())[0]
try:
c.execute("SELECT time, temp, humid, dew_point, pressure FROM log WHERE time >= ? AND time < ?", (int_time_min, int_time_max))
rows = c.fetchall()
except Exception as e:
print("Error while get_val_last from db: " + str(e))
conn.close()
return rows
# Get last updatate time from db
def get_last_update_time_from_db():
conn = sqlite3.connect(db_path)
c = conn.cursor()
try:
c.execute("SELECT time FROM log ORDER BY time DESC LIMIT 1")
row = c.fet |
TallonRain/horsefaxbot | horsefax/telegram/events/threaded.py | Python | mit | 2,365 | 0.000846 | import threading
import queue
from . import BaseEventHandler, BaseEventQueue
class ThreadedEventHandler(BaseEventHandler):
"""
A threaded implementation of :class:`.BaseEventHandler`.
"""
def __init__(self):
self._handlers = {}
self._handle_map = {}
self._counter = 0
self._handler_lock = threading.RLock()
def register_handler(self, event, handler):
with self._handler_lock:
self._counter += 1
self._handlers.setdefault(event, {})[self._counter] = handler
self._handle_map[self._counter] = event
return self._counter
def unregister_handler(self, handle):
with self._handler_lock:
if handle not in self._handle_map:
return
del self._handlers[self._handle_map[handle]][handle]
del self._handle_map[handle]
def wait_for_event(self, event, timeout=10):
return _BlockingEventWait(self, event).wait(timeout=timeout)
def queue_events(self, event):
return _QueuedEventWait(self, event)
def broadcast_event(self, event, *args):
for handler in list(self._handlers.get(event, {}).values()):
handler(*args)
class _BlockingEventWait(object):
def __init__(self, events, event):
self.block = threading.Event()
self.event_handler = events
self.result = None
self.handle = self.event_handler.register_handler(event, self.handle_result)
def handle_result(self, *args):
self.result, = args
self.event_handler.unregister_handler(self.handle)
self.block.set()
def wait(self, timeout=10):
if not self.block.wait(timeout=timeout):
raise TimeoutError()
return self.result
class _QueuedEventWait(BaseEventQueue): |
def __init__( | self, events, event):
self.queue = queue.Queue()
self.event_handler = events
self.handle = self.event_handler.register_handler(event, self._handle_event)
def _handle_event(self, arg):
self.queue.put(arg)
def close(self):
self.event_handler.unregister_handler(self.handle)
def get(self, timeout=10):
try:
return self.queue.get(timeout=timeout)
except queue.Empty:
raise TimeoutError()
def __iter__(self):
yield self.get()
|
wilhelm-murdoch/blink | setup.py | Python | mit | 479 | 0 | #!/usr/bin/env python
# -*- coding: utf | -8 -*-
from setuptools import setup, find_packages
from version import __version__
setup(
name='blink',
version=__version__,
description='Create jQuery-like eve | nts for your Python app.',
author='Wilhelm Murdoch',
author_email='wilhelm.murdoch@gmail.com',
url='http://www.thedrunkenepic.com/',
packages=find_packages(exclude=['tests']),
setup_requires=[
'nose==1.3.0',
'yanc==0.2.3'
]
)
|
HamutalCohen3/anyway | clusters_calculator.py | Python | bsd-3-clause | 1,569 | 0.003824 | from models import Marker
from static.pymapcluster import calculate_clusters
import time
import logging
import concurrent.futures
import multiprocessing
def retrieve_clusters(**kwargs):
marker_boxes = divide_to_boxes(kwargs['ne_lat'], kwargs['ne_lng'], kwargs['sw_lat'], kwargs['sw_lng'])
result_futures = []
logging.info('number of cores: ' + str(multiprocessing.cpu_count()))
with concurrent.futures.ThreadPoolExecutor(max_workers=multiprocessing.cpu_count()) as executor:
for marker_box in marker_boxes:
kwargs.update(marker_box)
markers_in_box = Marker.bounding_box_query(**kwargs).all()
result_futures.append(executor.submit(calculate_clusters, markers_in_box, kwargs['zoom']))
completed_futures = concurrent.futures.wait(result_futures)
result = []
for future in completed_futures.done:
result.extend(future.result())
return result
def divide_to_boxes(ne_lat, ne_lng, sw_lat, sw_lng):
cpu_count = multiprocessing.cpu_count()
lat_box_size | = (ne_lat - sw_lat) / cpu_count
# lng_box_size = (sw_lng - ne_lng) / cpu_count
boxes = []
for i in xrange(cpu_count):
# TODO: the below calculation is using sw_lat as first param instead of ne_lat. Plz verify my fix for that:
# boxes.append((sw_lat + (i + 1) * lat_box_size, ne_lng, sw_lat + i * lat_box_size, sw_lng))
| boxes.append({'ne_lat': ne_lat + (i + 1) * lat_box_size, 'ne_lng': ne_lng,
'sw_lat': sw_lat + i * lat_box_size, 'sw_lng': sw_lng})
return boxes
|
huangenyan/Lattish | project/mahjong/ai/base.py | Python | mit | 187 | 0 | # -*- coding: utf-8 -*-
class BaseAI(object):
player = None
table = None
| def __init__(self, player):
self.player = | player
def discard_tile(self):
pass
|
lsst-sims/sims_phosim_pythoncontrol | fullFocalplane.py | Python | gpl-3.0 | 11,426 | 0.009102 | #!/usr/bin/python
"""Perform preprocessing and generate raytrace exec scripts for one focal plane.
For documentation using the python_control for ImSim/PhoSim version <= v.3.0.x,
see README.v3.0.x.txt.
For documentation using the python_control for ImSim/PhoSim version == v.3.2.x,
see README.txt.
The behavior of this script differs depending on the version of ImSim/PhoSim.
For versions <= v3.0.x, it functions like the original fullFocalplane.py and
calls AllChipsScriptGenerator.makeScripts() to generate a script and some tarballs
that can in turn be executed to run the preprocessing step (which in turn calls
AllChipsScriptGenerator) to generate shells scripts and tarballs for performing
the raytrace stage. See README.v3.0.x.txt for more info.
The behavior for ImSim/PhoSim version == 3.2.x is to run the preprocessing step
directly through the class PhosimManager.PhosimPrepreprocessor (which in turn
calls phosim.py in the phosin.git repository). After the preprocessing is
complete, PhosimPreprocessor generates shell scripts for the raytrace phase.
A few notes on options:
--skip_atmoscreens: Use this to optionally skip the step to generate atmosphere
screens during preprocessing and instead perform this
operation at the start of the raytrace phase. This is
useful in distributed environments where the cost of
transferring the atmosphere screens to the compute node
is higher than recalculating them.
--logtostderr: (only v3.2.x and higher) By default, log output from python_controls
is done via the python logging module, and directed to either
log_dir in the imsim_config_file or /tmp/fullFocalplane.log
if log_dir is not specified. This option overrides this behavior
and prints logging information to stdout. Note: output from
phosim.py and the phosim binaries are still printed to stdout.
TODO(gardnerj): Add stdout log redirect
TODO(gardnerj): Support sensor_ids argument for phosim.py.
TODO(gardnerj): Support not running e2adc step.
"""
from __future__ import with_statement
import ConfigParser
from distutils import version
import logging
from optparse import OptionParser # Can't use argparse yet, since we must work in 2.5
import os
import sys
from AllChipsScriptGenerator import AllChipsScriptGenerator
import PhosimManager
import PhosimUtil
import PhosimVerifier
import ScriptWriter
__author__ = 'Jeff Gardner (gardnerj@phys.washington.edu)'
logger = logging.getLogger(__name__)
def DoPreprocOldVersion(trimfile, policy, extra_commands, scheduler, sensor_id):
"""Do preprocessing for v3.1.0 and earlier.
Args:
trimfile: Full path to trim metadata file.
policy: ConfigParser object from python_controls config file.
extra_commands: Full path to extra commands or 'extraid' file.
scheduler: Name of scheduler (currently, just 'csh' is supported).
sensor_id: If not '', run just this single sensor ID.
Returns:
0 (success)
"""
with PhosimUtil.WithTimer() as t:
# Determine the pre-processing scheduler so that we know which class to use
if scheduler == 'csh':
scriptGenerator = AllChipsScriptGenerator(trimfile, policy, extra_commands)
scriptGenerator.makeScripts(sensor_id)
elif scheduler == 'pbs':
scriptGenerator = AllChipsScriptGenerator_Pbs(trimfile, policy, extra_commands)
scriptGenerator.makeScripts(sensor_id)
elif scheduler == 'exacycle':
print 'Exacycle funtionality not added yet.'
return 1
else:
print 'Scheduler "%s" unknown. Use -h or --help for help.' % scheduler
t.LogWall('makeScripts')
return 0
def DoPreproc(trimfile, imsim_config_file, extra_commands, scheduler,
skip_atmoscreens=False, keep_scratch_dirs=False):
"""Do preprocessing for v3.2.0 and later.
Args:
trimfile: Full path to trim metadata file.
imsim_config_file: Full path to the python_controls config file.
extra_commands: Full path to extra commands or 'extraid' file.
scheduler: Name of scheduler (currently, just 'csh' is supported).
skip_atmoscreens: Generate atmosphere screens in raytrace stage instead
of preprocessing stage.
keep_scratch_dirs: Do not delete the working directories at the end of
execution.
Returns:
0 upon success, 1 upon failure.
"""
if scheduler == 'csh':
preprocessor = PhosimManager.Preprocessor(imsim_config_file,
trimfile, extra_commands)
elif scheduler == 'pbs':
# Construct PhosimPreprocessor with PBS-specific ScriptWriter
preprocessor = PhosimManager.Preprocessor(
imsim_config_file, trimfile, extra_commands,
script_writer_class=ScriptWriter.PbsRaytraceScriptWriter)
# Read in PBS-specific config
policy = ConfigParser.RawConfigParser()
policy.read(imsim_config_file)
preprocessor.script_writer.ParsePbsConfig(policy)
else:
logger.critical('Unknown scheduler: %s. Use -h or --help for help',
scheduler)
return 1
preprocessor.InitExecEnvironment()
with PhosimUtil.WithTimer() as t:
if not preprocessor.DoPreprocessing(skip_atmoscreens=skip_atmoscreens):
logger.critical('DoPreprocessing() failed.')
return 1
t.LogWall('DoPreprocessing')
exec_manifest_fn = 'execmanifest_raytrace_%s.txt' % | preprocessor.focalplane.observationID
files_to_stage = preprocessor.ArchiveRaytraceInputByExt(exec_archive_name=exec_manifest_fn)
if not files_to_stage:
logger.critical('Output archive step failed.')
return 1
with PhosimUtil.WithTimer() as t:
preprocessor.StageOutput(files_to_stage)
t.LogWall('StageOutput')
if not keep_scratch_dirs:
preprocessor.Cleanup()
verifier = PhosimVerifier.PreprocVerifier(imsim_c | onfig_file, trimfile,
extra_commands)
missing_files = verifier.VerifySharedOutput()
if missing_files:
logger.critical('Verification failed with the following files missing:')
for fn in missing_files:
logger.critical(' %s', fn)
sys.stderr.write('Verification failed with the following files missing:\n')
for fn in missing_files:
sys.stderr.write(' %s\n', fn)
else:
logger.info('Verification completed successfully.')
return 0
def ConfigureLogging(trimfile, policy, log_to_stdout, imsim_config_file,
extra_commands=None):
"""Configures logger.
If log_to_stdout, the logger will write to stdout. Otherwise, it will
write to:
'log_dir' in the config file, if present
/tmp/fullFocalplane.log if 'log_dir' is not present.
Stdout from phosim.py and PhoSim binaries always goes to stdout.
"""
if log_to_stdout:
log_fn = None
else:
if policy.has_option('general', 'log_dir'):
# Log to file in log_dir
obsid, filter_num = PhosimManager.ObservationIdFromTrimfile(
trimfile, extra_commands=options.extra_commands)
log_dir = os.path.join(policy.get('general', 'log_dir'), obsid)
log_fn = os.path.join(log_dir, 'fullFocalplane_%s.log' % obsid)
else:
log_fn = '/tmp/fullFocalplane.log'
PhosimUtil.ConfigureLogging(policy.getint('general', 'debug_level'),
logfile_fullpath=log_fn)
params_str = 'trimfile=%s\nconfig_file=%s\n' % (trimfile, imsim_config_file)
if extra_commands:
params_str += 'extra_commands=%s\n' % extra_commands
PhosimUtil.WriteLogHeader(__file__, params_str=params_str)
def main(trimfile, imsim_config_file, extra_commands, skip_atmoscreens,
keep_scratch_dirs, sensor_ids, log_to_stdout=False):
"""
Run the fullFocalplanePbs.py script, populating it with the
correct user and cluster job submission information from an LSST
policy file.
"""
policy = ConfigParser.RawConfigParser()
policy.read(imsim_config_file)
if policy.has_option('general', 'phosim_version'):
phosim_version = policy.get('general', 'phosim_versio |
pneerincx/easybuild-framework | easybuild/toolchains/linalg/__init__.py | Python | gpl-2.0 | 1,340 | 0.002239 | ##
# Copyright 2012-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be | /in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License | as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Declaration of toolchains.linalg namespace.
@author: Stijn De Weirdt (Ghent University)
@author: Kenneth Hoste (Ghent University)
"""
from pkgutil import extend_path
# we're not the only ones in this namespace
__path__ = extend_path(__path__, __name__) #@ReservedAssignment
|
Jumpers/MysoftAutoTest | Step1-PythonBasic/Practices/wangr/31-35/ex31.py | Python | apache-2.0 | 1,033 | 0.019361 | #-*-coding:utf-8-*-
#coding=utf-8
print "you enter a dark room with two doors. Do you go through door #1 or door #2"
door = raw_input(">>>")
if door == "1":
print "there's a glant bear here eating a cheese cake. What do you do?"
print "1,Take the cake"
print "2,Scream at the bear"
bear = raw_input(">>>")
if bear == "1":
print "The bear eats your face off.Good job!"
elif bear == "2":
print "The bear | eats your legs off. Good job!"
else:
print "Well,doing %s is probably better.Bear runs away." % bear
elif door =="2":
print "You stare into the endless abyss at Cthulhu's retina."
print "1, Blueberries"
print "2, Yellow jacket clothspins"
print "3, Understanding revolvers yelling me | lodies."
if insanity == "1" or insanity == "2":
print "you body survives powered by a mind of jello, Good job!"
else:
print "The insanity rots your eyes into a pool of muck. Good job!"
else:
print "you stumble around and fail on a knife and die. Good job!"
|
TalhaAsmal/Taming-Big-Data-Pyspark-Udemy | ratings-counter.py | Python | mit | 437 | 0 | from pyspark import SparkConf | , SparkContext
import collections
conf = SparkConf().setMaster("local").setAppName("RatingsHistogram")
sc = SparkContext(conf=conf)
lines = sc.textFile("file:///SparkCourse/ml-100k/u.data")
ratings = lines.map(la | mbda x: x.split()[2])
result = ratings.countByValue()
sortedResults = collections.OrderedDict(sorted(result.items()))
for key, value in sortedResults.items():
print("%s %i" % (key, value))
|
BrythonServer/ggame | examples/assetcolor.py | Python | mit | 90 | 0 | "" | "
Example of using Color cl | ass.
"""
from ggame import Color
RED = Color(0xFF0000, 1.0)
|
claymation/pystogram | pystogram/histogram.py | Python | bsd-2-clause | 3,484 | 0.004879 | import datetime
from pystogram.tree import PrefixTree
SECOND = 1
MINUTE = SECOND * 60
HOUR = MINUTE * 60
DAY = HOUR * 24
MONTH = DAY * 30
YEAR = DAY * 365
# The multiplier applied when testing timestamp interval to guess a resolution.
# A value of 2.0 means the timestamp interval must be greater than 24 months in
# order to use a resolution of years
RESOLUTION_SCALE = 2.0
# FIXME: Where to put this?
def prefix(timestamp, resolution):
"""
Compute and return a key prefix for this timestamp.
"""
# FIXME: Improve?
length = 1
if resolution < YEAR: length += 1
if resolution < MONTH: length += 1
if resolution < DAY: length += 1
if resolution < HOUR: length += 1
if resolution < MINUTE: length += 1
return timestamp.timetuple()[:length]
# FIXME: Missing domain concepts: timestamp (essentially a datetime), key (essentially a time.struct_time tuple)
class Histogram(object):
"""
An informal histogram useful for counting time-series data, dividing samples
into equally-sized intervals (buckets), and computing aggregate counts of the
samples within each bucket.
"""
def __init__(self):
"""
Construct a Histogram instance.
"""
self.tree = PrefixTree()
def count(self, timestamp):
"""
Increment the count for this timestamp.
"""
self.tree.incr(timestamp)
@property
def first_sample(self):
# FIXME: Subclass PrefixTree into DateTimePrefixTree so we don't have to do this conversion here?
return datetime.datetime(*self.tree.least())
@property
def last_sample(self):
# FIXME: Subclass PrefixTree into DateTimePrefixTree so we don't have to do this conversion here?
return datetime.datetime(*self.tree.greatest())
@property
def sample_interval(self):
return (self.last_sample - self.first_sample).total_seconds()
@property
def sample_resolution(self):
"""
Compute a reasonable bucket resolution based on the sample interval.
"""
# FIXME: Improve?
interval = self.sample_interval
if interval > YEAR * RESOLUTION_SCALE:
return YEAR
elif interval > MONTH * RESOLUTION_SCALE:
return MONTH
elif interval > DAY * RESOLUTION_SCALE:
return DAY
elif interval > HOUR * RESOLUTION_SCALE:
return HOUR
elif interval > MINUTE * RESOLUTION_SCALE:
return MINUTE
else:
return SECOND
def buckets(self, resolution=None):
"""
Generate and yield buckets sized according to the passed or guessed resolution.
"""
# Cache these properties locally
first_sample = self.first_sample
last_sample = self.last_sample
# Compute the bucket resolution and interval (width)
if resolution is None: resolution = self.sample_resolution
bucket_interval = datetime.timedelta(seconds=resolution)
timestamp = first_sample
| while timestamp <= last_sample:
node = self.tree.insert(prefix(timestamp, resolution))
bucket = Bucket(timestamp, node)
yield bucket
timestamp += bu | cket_interval
class Bucket(object):
"""
Histogram bucket for a given time interval.
"""
def __init__(self, start, node):
self.start = start
self.node = node
self.count = node.sum()
|
Razi91/BiblioTeKa | manage/management/commands/randoms.py | Python | gpl-2.0 | 5,194 | 0.024713 | __author__ = 'jkonieczny'
from books.models import *
import random
import datetime
from django.core.management.base import BaseCommand, CommandError
_names1 = [
"JAKUB",
"KACPER",
"SZYMON",
"MATEUSZ",
"FILIP",
"MICHAŁ",
"BARTOSZ",
"WIKTOR",
"PIOTR",
"DAWID",
"ADAM",
"MACIEJ",
"JAN",
"IGOR",
"MIKOŁAJ",
"PATRYK",
"PAWEŁ",
"DOMINIK",
"OSKAR",
"ANTONI",
"WOJCIECH",
"KAMIL",
"ALEKSANDER",
"KRZYSZTOF",
"OLIWIER",
"MARCEL",
"KAROL",
"FRANCISZEK",
"TOMASZ",
"HUBERT",
"BARTŁOMIEJ",
"ADRIAN",
"ALAN",
"SEBASTIAN",
"MIŁOSZ",
"KRYSTIAN",
"ŁUKASZ",
"NIKODEM",
"GABRIEL",
"MARCIN",
"STANISŁAW",
"DAMIAN",
"KONRAD",
"DANIEL",
"FABIAN",
"BŁAEJ",
"RAFAŁ",
"TYMOTEUSZ",
"KSAWERY"]
_names2 = [u"NOWAK",
u"KOWALSKI",
u"WIŚNIEWSKI",
u"WÓJCIK",
u"KOWALCZYK",
u"KAMIŃSKI",
u"LEWANDOWSKI",
u"ZIELIŃSKI",
u"WOŹNIAK",
u"SZYMAŃSKI",
u"DĄBROWSKI",
u"KOZŁOWSKI",
u"JANKOWSKI",
u"MAZUR",
u"KWIATKOWSKI",
u"WOJCIECHOWSKI",
u"KRAWCZYK",
u"KACZMAREK",
u"PIOTROWSKI",
u"GRABOWSKI",
u"ZAJĄC",
u"PAWŁOWSKI",
u"KRÓL",
u"MICHALSKI",
u"WRÓBEL",
u"WIECZOREK",
u"JABŁOŃSKI",
u"NOWAKOWSKI",
u"MAJEWSKI",
u"OLSZEWSKI",
u"STĘPIEŃ",
u"DUDEK",
u"JAWORSKI",
u"MALINOWSKI",
u"ADAMCZYK",
u"PAWLAK",
u"GÓRSKI",
u"NOWICKI",
u"SIKORA",
u"WITKOWSKI",
u"WALCZAK",
u"RUTKOWSKI",
u"BARAN",
u"MICHALAK",
u"SZEWCZYK",
u"OSTROWSKI",
u"TOMASZEWSKI",
u"ZALEWSKI",
u"WRÓBLEWSKI",
u"PIETRZAK"]
_titles = ['łyżka',
'ręka',
'ucho',
'kolczyk',
'bluzka',
'koc',
'buty',
'kot',
'piasek',
'lawa',
'rekin',
'szuflada',
'figurka',
'biurko',
'mata',
'woda',
'kanapka',
'wózek',
'lalka',
'gitara',
'bębny',
'pianino',
'talerz',
'puzzle',
'tygrys',
'miś',
'linoleum',
'naklejka',
'folder',
'plik',
'lista',
'zegar',
'lis',
'motyl',
'klawiatura',
'kamera',
'aparat',
'telefon',
'kabaretki',
'pończochy',
'sukienka',
'żółw',
'ryba',
'ość',
'lód',
'schabowe',
'sałatka',
'samochód']
_genres = ['powieść',
'nowela',
'opowiadanie',
'epopeja',
'baśń',
'mit',
'legenda',
'pamiętnik',
'przypowieść ',
'oda',
'hymn',
'pieśń',
'tren',
'elegi',
'fraszka',
'epigramat',
'sonet ',
'dramat właściwy',
'tragedia',
'komedia',
'farsa',
'tragifarsa',
'opera']
_pub = ['GREG', 'PWN', 'ISKRA', u'Prószyński i S-ka', 'Znak']
class Command(BaseCommand):
help = 'Closes the specified poll for voting'
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
authors = []
for i in range(30):
a = Author()
name = _names1[int(random.random()*len(_names1))]
name +=' ' + _names1[int(random.random()*len(_names1))]
a.name = name
a.born = datetime.date(1980, 1, 1)
a.save()
authors.append(a)
genres = []
for i in _genres:
g = Genre()
g.name = i
g.save()
genres.append(g)
titles = []
for i in range(150):
t = BookTitle()
t.release = datetime.date(1900+int(random.random()*115), int(1+random.random()*10), int(1+random.random()*25))
t.title = _titles[int(random.random()*len(_titles))]
t.save()
for i in range(1+int(max(0, random.normalvariate(0, 1)))):
t.author.add(authors[int(random.random()*len(authors))])
for i in range(1+int(max(0, random.normalvariate(0, 1)))):
t.genre.add(genres[int(random.random()*len(genres))])
titles.append(t)
t.save()
publisher = []
for pu in _pub:
p = Publisher()
p.name = pu
p.save()
publisher.append(p)
pricing = []
for i in range(1, 5):
p = Pricing()
p.name = "{0}".format(i)
p.added = (datetime.datetime.now() - datetime.timedelta(days=random.random()*365*10))
p.initial = i
p.per_week = i
p.save()
pricing.append(p)
for title in titles[:-5]:
for _e in range(1+int(min(0,random.normalvariate(0,1)))):
edition = BookEdition()
edition.publisher = publisher[int(random.random()*len(publisher))]
edition.isbn = ''
edition.release = (datetime.datetime.now() - datetime.timedelta(days=random.random()*365*10)).date()
edition.title = title
edition.pricing = pricing[int(random.random()*len(pricing))]
edition.save()
for i in range(int(random.random()*20)): |
entity = BookEntity()
| entity.title = title
entity.book = edition
entity.uuid = uuid.uuid4()
entity.quality = max(10, min(0, random.normalvariate(7, 5)))
entity.save()
|
sparkslabs/kamaelia_ | Sketches/RJL/Packages/Kamaelia/Community/RJL/Kamaelia/Util/ChunkNamer.py | Python | apache-2.0 | 3,315 | 0.006938 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the Li | cense.
# -------------------------------------------------------------------------
# Licensed to the BBC under a Contributor Agreement: RJL
"""\
=========================
Chunk Namer
=========================
A component that labels each message with a unique filename for that message.
e.g. "A" ... "B" ... --> ["chunk1", "A"] ... ["chunk2", "B"] ...
Example Usage |
-------------
Save each line entered to the console to a separate file:
pipeline(
ConsoleReader(),
ChunkNamer("test", ".txt"),
WholeFileWriter()
).run()
"""
from Axon.Component import component
from Axon.Ipc import producerFinished, shutdown
class ChunkNamer(component):
"""\
ChunkNamer() -> new ChunkNamer component.
Gives a filename to the chunk and sends it in the form [filename, contents],
e.g. to a WholeFileWriter component.
Keyword arguments:
-- basepath - the prefix to apply to the filename
-- suffix - the suffix to apply to the filename
"""
Inboxes = {
"inbox" : "Chunks to be saved",
"control" : "Shut me down"
}
Outboxes = {
"outbox" : "List: [file name, file contents]",
"signal" : "signal when I've shut down"
}
def __init__(self, basepath = "", suffix = ""):
super(ChunkNamer, self).__init__()
self.basepath = basepath
self.suffix = suffix
def main(self):
buffer = ""
chunknumber = 0
while 1:
yield 1
while self.dataReady("inbox"):
chunknumber += 1
data = self.recv("inbox")
# create list of form [filename, contents]
command = [self.basepath + "chunk" + str(chunknumber) + self.suffix, data]
self.send(command, "outbox")
while self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg, producerFinished) or isinstance(msg, shutdown):
self.send(producerFinished(self), "signal")
return
self.pause()
__kamaelia_components__ = ( ChunkNamer, )
if __name__ == "__main__":
from Kamaelia.Chassis.Pipeline import pipeline
from Kamaelia.Community.RJL.Kamaelia.File.WholeFileWriter import WholeFileWriter
from Kamaelia.Util.Console import ConsoleReader
pipeline(
ConsoleReader(),
ChunkNamer("test", ".txt"),
WholeFileWriter()
).run()
|
krafczyk/spack | var/spack/repos/builtin/packages/mpir/package.py | Python | lgpl-2.1 | 2,446 | 0.000818 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and th | e LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for m | ore details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Mpir(Package):
"""Multiple Precision Integers and Rationals."""
homepage = "https://github.com/wbhart/mpir"
url = "https://github.com/wbhart/mpir/archive/mpir-2.7.0.tar.gz"
git = "https://github.com/wbhart/mpir.git"
version('develop', branch='master')
version('2.7.0', '985b5d57bd0e74c74125ee885b9c8f71')
version('2.6.0', 'ec17d6a7e026114ceb734b2466aa0a91')
# This setting allows mpir to act as a drop-in replacement for gmp
variant('gmp_compat', default=False,
description='Compile with GMP library compatibility')
# Build dependencies
depends_on('autoconf', type='build')
# Other dependencies
depends_on('yasm')
def install(self, spec, prefix):
# We definitely don't want to have MPIR build its
# own version of YASM. This tries to install it
# to a system directory.
options = ['--prefix={0}'.format(prefix),
'--with-system-yasm']
if '+gmp_compat' in spec:
options.extend(['--enable-gmpcompat'])
configure(*options)
make()
if self.run_tests:
make('check')
make('install')
|
pmuller/versions | versions/compat.py | Python | mit | 273 | 0.007326 | import sys
MAJOR = sys.version_info[0]
if MAJOR == 3:
| cmp = lambda a, b: (a > b) - (a < b) # pragma: no cover
basestring = str # pragma: no cover
else: # hopefully MAJOR == 2
cmp = cmp # pragma: no cover
basestring = basestring # pragma: no | cover
|
csyhhu/L-OBS | PyTorch/ImageNet/validate-AlexNet.py | Python | mit | 2,746 | 0.011289 | """
This code validates the performance of AlexNet after L-OBS prunning
"""
import torch
import torch.backends.cudnn as cudnn
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from models.alexnet import AlexNet
from utils import validate, adjust_mean_var
import numpy as np
from datetime import datetime
use_cuda = torch.cuda.is_available()
# -------------------------------------------- User Config ------------------------------------
# Specify parameters path
pruned_weight_root = './AlexNet/pruned_weight_100k'
pretrain_model_path = './AlexNet/alexnet-owt-4df8aa71.pth'
n_validate_batch = 100 # Number of batches used for validation
validate_batch_size = 50 # Batch size of validation
prune_ratio = {
'features.0': 80,
'features.3': 35,
'features.6': 35,
'features.8': 35,
'features.10': 35,
'classifier.1': 10,
'classifier.4': 10,
'classifier.6': 35
}
# -------------------------------------------- User Config ------------------------------------
net = AlexNet()
net.load_state_dict(torch.load(pretrain_model_path))
param = net.state_dict()
total_nnz = 0
total_nelements = 0
for layer_name, CR in prune_ratio.items():
if CR != 100:
pruned_weight = np.load('%s/CR_%d/%s.weight.npy' %(pruned_weight_root, CR, layer_name))
pruned_bias = np.load('%s/CR_%d/%s.bias.npy' %(pruned_weight_root, CR, layer_name))
# Calculate sparsity
total_nnz += np.count_nonzero(pruned_weight)
total_nnz += np.count_nonzero(pruned_bias)
total_nelements += pruned_weight.size
total_nelements += pruned_bias.size
param['%s.weight' %layer_name] = torch.FloatTensor(pruned_weight)
param['%s.bias' %layer_name] = torch.FloatTensor(pruned_bias)
overall_CR = float(total_nnz) / float(total_nelements)
print ('Overall compression rate (nnz/total): %f' %overall_CR)
net.load_state_dict(param)
'''
print ('[%s] Begin adjust finish. Now saving parameters' %(datetime.now())) |
adjust_mean_var(net, train_loader, None)
print ('[% | s] Adjust finish. Now saving parameters' %(datetime.now()))
'''
# Load validation dataset
valdir = '/home/shangyu/imagenet-val'
print('==> Preparing data..')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size = validate_batch_size, shuffle=True)
if use_cuda:
net.cuda()
net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))
cudnn.benchmark = True
validate(net, val_loader, None, None, n_validate_batch, use_cuda) |
romanvm/django-tinymce4-lite | test_tinymce/views.py | Python | mit | 363 | 0 | from django.views.generic import CreateView, DetailView
from .models import TestModel
class TestCreateView(CreateView):
template_name = 'test_tinymce/create.html'
fields = ('content',)
model = TestModel
clas | s TestDisplayView(DetailView):
template_name = 'test_tin | ymce/display.html'
context_object_name = 'test_model'
model = TestModel
|
cjparsons74/Kupfer-cjparsons74 | kupfer/plugin/virtualbox/constants.py | Python | gpl-3.0 | 417 | 0.002404 | # -*- coding: UTF-8 -*-
'''
virtualbox_co | nst_support.py
Constants for VirtualBox.
'''
__author__ = "Karol Będkowski <karol.bedkowski@gmail.com>"
__version__ = '0.3'
# virtual machine states
VM_STATE_POWEROFF = 0
VM_STATE_POWERON = 1
VM_STATE_PAUSED = 2
# virtual machine actions
VM_START_NORMAL = 1
VM_START_HEADLESS = 2
VM_PAUSE = 3
VM_POWEROFF = 4
VM_ACPI_POWEROFF = 5
VM_REBOOT = 6
VM_RESUME = 7
VM | _SAVE = 8
|
lmzintgraf/MultiMAuS | authenticators/abstract_authenticator.py | Python | mit | 596 | 0.003356 | from abc import ABCMeta, abstractmethod
class AbstractAuthenticator(metaclass=ABCMeta):
def __init__(self):
"""
Every authenticator has to have a name
:param name:
"""
super().__ini | t__()
@abstractmethod
def authorise_transaction(self, customer):
"""
Decide whether to authorise transaction.
Note that all relevant information can be obtained from the customer.
:param customer: the customer making a transactio | n
:return: boolean, whether or not to authorise the transaction
"""
|
EBIvariation/eva-cttv-pipeline | tests/eva_cttv_pipeline/trait_mapping/test_main.py | Python | apache-2.0 | 2,363 | 0.003386 | #!/usr/bin/env python3
"""Tests for the trait mapping pipeline. Test resources are compressed XML files which contain one or a few records
manually extracted from the main ClinVar XML to check specific cases."""
import os
import tempfile
from eva_cttv_pipeline.trait_mapping.main import main
def get_test_resource(resource_name):
"""Gets full path to the test resource located in the same directory as the test module."""
# Full path to this module.
this_module = os.path.abspath(__file__)
# Full path to the direct | ory where it is contained.
module_directory = os.path.dirname(this_module)
# Full path to the requested resource.
return os.path.join(module_directory, 'resources', resource_name)
def run_pipeline(resource_name):
"""Runs the pipeline on a given test resource and returns the output traits, automated mappings, and curation terms
as lists of lists | ."""
input_filename = get_test_resource(resource_name)
traits_file, mappings_file, curation_file = [tempfile.NamedTemporaryFile(delete=False) for _ in range(3)]
filters = {
'ontologies': 'efo,ordo,hp,mondo',
'required': 'cttv,eva-clinvar,clinvar-xrefs,gwas',
'preferred': 'eva-clinvar,cttv,gwas,clinvar-xrefs',
}
main(
input_filepath=input_filename,
output_traits_filepath=traits_file.name,
output_mappings_filepath=mappings_file.name,
output_curation_filepath=curation_file.name,
filters=filters,
zooma_host='https://www.ebi.ac.uk',
oxo_target_list=['Orphanet', 'efo', 'hp', 'mondo'],
oxo_distance=3
)
output_traits = [line.rstrip().split(',') for line in open(traits_file.name).read().splitlines()][1:]
output_mappings = [line.rstrip().split('\t') for line in open(mappings_file.name).read().splitlines()][1:]
output_curation = [line.rstrip().split('\t') for line in open(curation_file.name).read().splitlines()]
for temp_file in (traits_file, mappings_file, curation_file):
os.remove(temp_file.name)
return output_traits, output_mappings, output_curation
def test_main():
"""Basic sanity test of output files, using a random sample of records."""
output_traits, output_mappings, output_curation = run_pipeline('sample.xml.gz')
assert len(output_mappings) + len(output_curation) == len(output_traits)
|
chfoo/warcat | warcat/__init__.py | Python | gpl-3.0 | 130 | 0 | '''WARCAT: Web ARChive (WARC) Arch | iving Tool
Tool | and library for handling Web ARChive (WARC) files.
'''
from .version import *
|
plieningerweb/cumulocity-python-device-client | test/test_app.py | Python | mit | 698 | 0.005731 | import unittest
import app
try:
| from unittest import mock
except ImportError:
import mock
from Cumulocity import Cumulocity
class TestApp(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@mock.patch('Cumulocity.Cumulocity', spec=Cumulocity)
def testMeasurement(self, mock_cumulocity):
Cumulocity('test-id')
c_ins | tance = mock_cumulocity.return_value
app.measure(c_instance)
expected_data = []
#check if we got exactly two calls
expected_calls = [
mock.ANY,
mock.ANY]
self.assertEqual(c_instance.addMeasurement.mock_calls, expected_calls)
|
bqbn/addons-server | src/olympia/translations/utils.py | Python | bsd-3-clause | 3,578 | 0.000559 | from django.template import engines
from django.utils.encoding import force_str
import html5lib
import jinja2
def truncate_text(text, limit, killwords=False, end='...'):
"""Return as many characters as possible without going over the limit.
Return the truncated text and the characters left before the limit, if any.
"""
text = text.strip()
text_length = len(text)
if text_length < limit:
return text, limit - text_length
# Explicitly add "end" in any case, as Jinja can't know we're truncating
# for real here, even though we might be at the end of a word.
text = jinja2.filters.do_truncate(
engines['jinja2'].env, text, length=limit, killwords=killwords, leeway=0, end=''
)
return text + end, 0
def trim(tree, limit, killwords, end):
"""Truncate the text of an html5lib tree."""
if tree.text: # Root node's text.
tree.text, limit = truncate_text(tree.text, limit, killwords, end)
for child in tree: # Immediate children.
if limit <= 0:
# We reached the limit, remove all remaining children.
tree.remove(child)
else:
# Recurse on the current child.
_parsed_tree, limit = trim(child, limit, killwords, end)
if tree.tail: # Root node's tail text.
if limit <= 0:
tree.tail = ''
else:
tree.tail, limit = truncate_text(tree.tail, limit, killwords, end)
return tree, limit
def text_length(tree):
"""Find the length of the text content, excluding markup."""
total = 0
for node in tree.getiterator(): # Traverse all the tree nodes.
# In etree, a node has a text and tail attribute.
# Eg: "<b>inner text</b> tail text <em>inner text</em>".
if node.text:
total += len(node.text.strip())
if node.tail:
total += len(node.tail.strip())
return total
def truncate(html, length, killwords=False, end='...'):
"""
Return a slice of ``html`` <= length chars.
killwords and end are currently ignored.
ONLY USE FOR KNOWN-SAFE HTML.
"""
tree = html5lib.parseFragment(html)
if text_length(tree) <= length:
return jinja2.Markup(html)
else:
# Get a truncated version of the tree.
short, _ = trim(tree, length, killwords, end)
# Serialize the parsed tree back to html.
walker = html5lib.treewalkers.getTreeWalker('etree')
stream = walker(short)
serializer = html5lib.serializer.HTMLSerializer(
quote_attr_values='always', omit_optional_tags=False
)
return jinja2.Markup(force_str(serializer.render(stream)))
def transfield_changed(field, initial, data):
"""
For forms, compares initial data against cleaned_data for TransFields.
Returns True if data is the same. Returns False if data is different.
Arguments:
field -- name of the form field as-is.
initial -- data in the form of {'description_en-us': 'x',
'description_en-br': 'y'}
data -- cleaned data in the form of {'description': {'init': '',
'en-us': 'x',
| 'en-br': 'y'}
"""
in | itial = [
(k, v.localized_string)
for k, v in iter(initial.items())
if '%s_' % field in k and v is not None
]
data = [
('%s_%s' % (field, k), v) for k, v in iter(data[field].items()) if k != 'init'
]
return set(initial) != set(data)
|
ptitjes/quodlibet | quodlibet/library/libraries.py | Python | gpl-2.0 | 28,163 | 0 | # Copyright 2006 Joe Wreschnig
# 2013 Nick Boultbee
# 2013,2014 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Base library classes.
These classes are the most basic library classes. As such they are the
least useful but most content-agnostic.
"""
import os
import shutil
import time
from gi.repository import GObject
from senf import fsn2text, fsnative
from quodlibet import _
from quodlibet.formats import MusicFile, AudioFileError, load_audio_files, \
dump_audio_files, SerializationError
from quodlibet.query import Query
from quodlibet.qltk.notif import Task
from quodlibet.util.atomic import atomic_save
from quodlibet.util.collection import Album
from quodlibet.util.collections import DictMixin
from quodlibet import util
from quodlibet import formats
from quodlibet.util.dprint import print_d, print_w
from quodlibet.util.path import unexpand, mkdir, normalize_path, ishidden, \
ismount
class Library(GObject.GObject, DictMixin):
"""A Library contains useful objects.
The only required method these objects support is a .key
attribute, but specific types of libraries may require more
advanced interfaces.
Every method which takes a sequence of items expects items to
implement __iter__, __len__ and __contains__.
Likewise the signals emit sequences which implement
__iter__, __len__ and __contains__ e.g. set(), list() or tuple().
WARNING: The library implements the dict interface with the exception
that iterating over it yields values and not keys.
"""
__gsignals__ = {
'changed': (GObject.SignalFlags.RUN_LAST, None, (object,)),
'removed': (GObject.SignalFlags.RUN_LAST, None, (object,)),
'added': (GObject.SignalFlags.RUN_LAST, None, (object,)),
}
librarian = None
dirty = False
def __init__(self, name=None):
super(Library, self).__init__()
self._contents = {}
self._name = name
if self.librarian is not None and name is not None:
self.librarian.register(self, name)
def destroy(self):
if self.librarian is not None and self._name is not None:
self.librarian._unregister(self, self._name)
def changed(self, items):
"""Alert other users that these items have changed.
This causes a 'changed' signal. If a librarian is available
this function will call its changed method instead, and all
libraries that librarian manages may fire a 'changed' signal.
The item list may be filtered to those items actually in the
library. If a librarian is available, it will handle the
filtering instead. That means if this method is delegated to
the librarian, this library's changed signal may not fire, but
another's might.
"""
if not items:
return
if self.librarian and self in self.librarian.libraries.values():
print_d("Changing %d items via librarian." % len(items), self)
self.librarian.changed(items)
else:
items = {item for item in items if item in self}
if not items:
return
print_d("Changing %d items directly." % len(items), self)
self._changed(items)
def _changed(self, items):
assert isinstance(items, set)
# Called by the chang | ed method and Librarians.
if not items:
return
print_d("Changing %d items." % len(items), self)
self.dirty = True
self.emit('changed', items)
def __iter__(self):
"""Iterate over the ite | ms in the library."""
return iter(self._contents.values())
def iteritems(self):
return iter(self._contents.items())
def iterkeys(self):
return iter(self._contents.keys())
def itervalues(self):
return iter(self._contents.values())
def __len__(self):
"""The number of items in the library."""
return len(self._contents)
def __getitem__(self, key):
"""Find a item given its key."""
return self._contents[key]
def __contains__(self, item):
"""Check if a key or item is in the library."""
try:
return item in self._contents or item.key in self._contents
except AttributeError:
return False
def get_content(self):
"""All items including hidden ones for saving the library
(see FileLibrary with masked items)
"""
return list(self.values())
def keys(self):
return self._contents.keys()
def values(self):
return self._contents.values()
def _load_item(self, item):
"""Load (add) an item into this library"""
# Subclasses should override this if they want to check
# item validity; see `FileLibrary`.
print_d("Loading %r." % item.key, self)
self.dirty = True
self._contents[item.key] = item
def _load_init(self, items):
"""Load many items into the library (on start)"""
# Subclasses should override this if they want to check
# item validity; see `FileLibrary`.
content = self._contents
for item in items:
content[item.key] = item
def add(self, items):
"""Add items. This causes an 'added' signal.
Return the sequence of items actually added, filtering out items
already in the library.
"""
items = {item for item in items if item not in self}
if not items:
return items
print_d("Adding %d items." % len(items), self)
for item in items:
self._contents[item.key] = item
self.dirty = True
self.emit('added', items)
return items
def remove(self, items):
"""Remove items. This causes a 'removed' signal.
Return the sequence of items actually removed.
"""
items = {item for item in items if item in self}
if not items:
return items
print_d("Removing %d items." % len(items), self)
for item in items:
del(self._contents[item.key])
self.dirty = True
self.emit('removed', items)
return items
def _load_items(filename):
"""Load items from disk.
In case of an error returns default or an empty list.
"""
try:
with open(filename, "rb") as fp:
data = fp.read()
except EnvironmentError:
print_w("Couldn't load library file from: %r" % filename)
return []
try:
items = load_audio_files(data)
except SerializationError:
# there are too many ways this could fail
util.print_exc()
# move the broken file out of the way
try:
shutil.copy(filename, filename + ".not-valid")
except EnvironmentError:
util.print_exc()
return []
return items
class PicklingMixin(object):
"""A mixin to provide persistence of a library by pickling to disk"""
filename = None
def load(self, filename):
"""Load a library from a file, containing a picked list.
Loading does not cause added, changed, or removed signals.
"""
self.filename = filename
print_d("Loading contents of %r." % filename, self)
items = _load_items(filename)
# this loads all items without checking their validity, but makes
# sure that non-mounted items are masked
self._load_init(items)
print_d("Done loading contents of %r." % filename, self)
def save(self, filename=None):
"""Save the library to the given filename, or the default if `None`"""
if filename is None:
filename = self.filename
print_d("Saving contents to %r." % filename, self)
try:
dirname = os.path.dirname(filename)
mkdir(dirname)
with atomic_save(filename, |
sestrella/ansible | test/units/modules/network/ios/test_ios_ping.py | Python | gpl-3.0 | 2,838 | 0.001409 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.ios import ios_ping
from units.mo | dules.utils import set_module_args
from .ios_module import TestIosModule, load_fixture
class TestIosPingModule(TestIosModule):
''' Class used | for Unit Tests agains ios_ping module '''
module = ios_ping
def setUp(self):
super(TestIosPingModule, self).setUp()
self.mock_run_commands = patch('ansible.modules.network.ios.ios_ping.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestIosPingModule, self).tearDown()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
module = args
commands = kwargs['commands']
output = list()
for command in commands:
filename = str(command).split(' | ')[0].replace(' ', '_')
output.append(load_fixture('ios_ping_%s' % filename))
return output
self.run_commands.side_effect = load_from_file
def test_ios_ping_expected_success(self):
''' Test for successful pings when destination should be reachable '''
set_module_args(dict(count=2, dest="8.8.8.8"))
self.execute_module()
def test_ios_ping_expected_failure(self):
''' Test for unsuccessful pings when destination should not be reachable '''
set_module_args(dict(count=2, dest="10.255.255.250", state="absent"))
self.execute_module()
def test_ios_ping_unexpected_success(self):
''' Test for successful pings when destination should not be reachable - FAIL. '''
set_module_args(dict(count=2, dest="8.8.8.8", state="absent"))
self.execute_module(failed=True)
def test_ios_ping_unexpected_failure(self):
''' Test for unsuccessful pings when destination should be reachable - FAIL. '''
set_module_args(dict(count=2, dest="10.255.255.250"))
self.execute_module(failed=True)
|
zgreatone/python-veralite | veralite/__init__.py | Python | lgpl-3.0 | 652 | 0 | # -*- coding:utf-8 -*-
from .veralite import Veralite
from .scene import Scene
from .device import Device
from | .device import DimmingLight
from .device import Switch
from .device import MotionSensor
from .exceptions import VeraliteException
from .exceptions import VeraliteConnectionError
from .exceptions import InvalidDeviceError
from .exceptions import InvalidSceneError
__all__ = ['Veralite',
'Scene',
'Device',
'DimmingLight',
'Switch',
'MotionSensor',
'VeraliteException',
'VeraliteConnectionError', |
'InvalidDeviceError',
'InvalidSceneError']
|
fraricci/pymatgen | pymatgen/analysis/diffraction/core.py | Python | mit | 7,522 | 0.000266 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import collections
import abc
import numpy as np
from pymatgen.core.spectrum import Spectrum
from pymatgen.util.plotting import add_fig_kwargs
"""
This module implements core classes for calculation of diffraction patterns.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "ongsp@ucsd.edu"
__date__ = "5/22/14"
class DiffractionPattern(Spectrum):
"""
A representation of a diffraction pattern
"""
XLABEL = "$2\\Theta$"
YLABEL = "Intensity"
def __init__(self, x, y, hkls, d_hkls):
"""
Args:
x: Two theta angles.
y: Intensities
hkls: [{"hkl": (h, k, l), "multiplicity": mult}],
where {"hkl": (h, k, l), "multiplicity": mult}
is a dict of Miller
indices for all diffracted lattice facets contributing to each
intensity.
d_hkls: List of interplanar spacings.
"""
super().__init__(x, y, hkls, d_hkls)
self.hkls = hkls
self.d_hkls = d_hkls
class AbstractDiffractionPatternCalculator(abc.ABC):
"""
Abstract base class for computing the diffraction pattern of a crystal.
"""
# Tolerance in which to treat two peaks as having the same two theta.
TWO_THETA_TOL = 1e-5
# Tolerance in which to treat a peak as effectively 0 if the scaled
# intensity is less than this number. Since the max intensity is 100,
# this means the peak must be less than 1e-5 of the peak intensity to be
# considered as zero. This deals with numerical issues where systematic
# absences do not cancel exactly to zero.
SCALED_INTENSITY_TOL = 1e-3
@abc.abstractmethod
def get_pattern(self, structure, scaled=True, two_theta_range=(0, 90)):
"""
Calculates the diffraction pattern for a structure.
Args:
structure (Structure): Input structure
scaled (bool): Whether to return scaled intensities. The maximum
peak is set to a value of 100. Defaults to True. Use False if
you need the absolute values to combine XRD plots.
two_theta_range ([float of length 2]): Tuple for range of
two_thetas to calculate in degrees. Defaults to (0, 90). Set to
None if you want all diffracted beams within the limiting
sphere of radius 2 / wavelength.
Returns:
(DiffractionPattern)
"""
pass
def get_plot(self, structure, two_theta_range=(0, 90),
annotate_peaks=True, ax=None, with_labels=True,
fontsize=16):
"""
Returns the diffraction plot as a matplotlib.pyplot.
Args:
structure: Input structure
two_theta_range ([float of length 2]): Tuple for range of
two_thetas to calculate in degrees. Defaults to (0, 90). Set to
None if you want all diffracted beams within the limiting
sphere of radius 2 / wavelength.
annotate_peaks: Whether to annotate the peaks with plane
information.
ax: matplotlib :class:`Axes` or None if a new figure should be created.
with_labels: True to add xlabels and ylabels to the plot.
fontsize: (int) fontsize for peak labels.
Returns:
(matplotlib.pyplot)
"""
if ax is None:
from pymatgen.util.plotting import pretty_plot
plt = pretty_plot(16, 10)
ax = plt.gca()
else:
# This to maintain the type of the return value.
import matplotlib.pyplot as plt
xrd = self.get_pattern(structure, two_theta_range=two_theta_range)
for two_theta, i, hkls, d_hkl in zip(xrd.x, xrd.y, xrd.hkls, xrd.d_hkls):
if two_theta_range[0] <= two_theta <= two_theta_range[1]:
label = ", ".join([str(hkl["hkl"]) for hkl in hkls])
ax.plot([two_theta, two_theta], [0, i], color='k',
linewidth=3, label=label)
if annotate_peaks:
ax.annotate(label, xy=[two_theta, i],
xytext=[two_theta, i], fontsize=fontsize)
if with_labels:
ax.set_xlabel(r"$2\theta$ ($^\circ$)")
ax.set_ylabel("Intensities (scaled)")
if hasattr(ax, "tight_layout"):
ax.tight_layout()
return plt
def show_plot(self, structure, **kwargs):
"""
Shows the diffraction plot.
Args:
structure (Structure): Input structure
two_theta_range ([float of length 2]): Tuple for range of
two_thetas to calculate in degrees. Defaults to (0, 90). Set to
None if you want all diffracted beams within the limiting
sphere of radius 2 / wavelength.
annotate_peaks (bool): Whether to annotate the peaks with plane
information.
"""
self.get_plot(structure, **kwargs).show()
@add_fig_kwargs
def plot_structures(self, structures, fontsize=6, **kwargs):
"""
Plot diffraction patterns for multiple structures on the same figure.
Args:
structures (Structure): List of structures
two_theta_range ([float of length 2]): Tuple for range of
two_thetas to calculate in degrees. Defaults to (0, 90). Set to
None if you want all diffracted beams within the limiting
sphere of radius 2 / wavelength.
annotate_peaks (bool): Whether to annotate the peaks with plane
information.
fontsize: (int) fontsize for peak labels.
"""
import | matplotlib.pyplot as plt
nrows = len(structures)
fig, axes = plt.subplots(nrows=nrows, ncols=1, sharex=True,
squeeze=False)
for i, (ax, structure) in enumerate(zip(axes.ravel(), structures)):
self.get_plot(structure,
fontsize=fontsize, ax=ax, with_la | bels=i == nrows - 1,
**kwargs)
spg_symbol, spg_number = structure.get_space_group_info()
ax.set_title("{} {} ({}) ".format(structure.formula, spg_symbol,
spg_number))
return fig
def get_unique_families(hkls):
"""
Returns unique families of Miller indices. Families must be permutations
of each other.
Args:
hkls ([h, k, l]): List of Miller indices.
Returns:
{hkl: multiplicity}: A dict with unique hkl and multiplicity.
"""
# TODO: Definitely can be sped up.
def is_perm(hkl1, hkl2):
h1 = np.abs(hkl1)
h2 = np.abs(hkl2)
return all([i == j for i, j in zip(sorted(h1), sorted(h2))])
unique = collections.defaultdict(list)
for hkl1 in hkls:
found = False
for hkl2 in unique.keys():
if is_perm(hkl1, hkl2):
found = True
unique[hkl2].append(hkl1)
break
if not found:
unique[hkl1].append(hkl1)
pretty_unique = {}
for k, v in unique.items():
pretty_unique[sorted(v)[-1]] = len(v)
return pretty_unique
|
microhh/microhh | kernel_tuner/statistics.py | Python | gpl-3.0 | 1,185 | 0.008439 | import matplotlib.pyplot as pl
import numpy as np
import json
import glob
pl.close('all')
pl.ion()
def get_timings(kernel_name, gridsizes):
dt = np.zeros_like(gridsizes, dtype=float)
for i,gridsize in enumerate(gridsizes):
with open( '{0}_{1:03d}.json'.format(kernel_name, gridsize) ) as f:
data = json.load(f)
timings = data[0]
fastest = 1e9
for timing in timings:
fastest = min(fastest, timing['time'])
dt[i] = fastest
| return dt
if __name__ == '__main__':
gridsize = np.arange(32, 513, 32)
normal = get_timings('diff_c_g', gridsize)
smem = get_timings('diff_c_g_smem', gridsize)
fac = gridsize**3
pl.figure(figsize=(8,4))
pl.subplot(121)
pl.plot(gridsize, norm | al/fac, 'k-x', label='non smem')
pl.plot(gridsize, smem /fac, 'r-x', label='smem')
pl.ylim(0, 2e-7)
pl.ylabel('time/gridpoint (s)')
pl.xlabel('gridpoints (-)')
pl.legend()
pl.grid()
pl.subplot(122)
pl.plot(gridsize, normal/smem, 'k-x')
pl.ylabel('non_smem/smem (-)')
pl.xlabel('gridpoints (-)')
pl.grid()
pl.tight_layout()
|
sparkslabs/kamaelia_ | Sketches/AM/KPIPackage/Kamaelia/Community/AM/Kamaelia/KPIFramework/KPI/Server/DataTx.py | Python | apache-2.0 | 2,606 | 0.004221 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitation | s under the License.
# -------------------------------------------------------------------------
#
"""
====================
DataTx
====================
The DataTx packetises the data. It adds p | acket header to the
How it works?
---------------------
The DataTx adds a header to the data received on its inboxes "keyin" and
"inbox". The packet header contains packet type and packet length.
It is necessary to distinguish between encrypted data to be sent and
encrypted session keys because the client needs to be able to
distinguish between the two.
"""
import Axon
import struct
class DataTx(Axon.Component.component):
"""\ DataTx() -> new DataTx component
Handles packetizing
Keyword arguments: None
"""
Inboxes = {"inbox" : "encrypted data",
"keyIn" : "encrypted session key",
"control" : "receive shutdown messages"}
Outboxes = {"outbox" : "add header and send encrypted key and data packets",
"signal" : "pass shutdown messages"}
def __init__(self):
super(DataTx,self).__init__()
def main(self):
KEY = 0x20
DATA = 0x30
while 1:
#add header - packet type=4 bytes and packet length = 4 bytes
while self.dataReady("keyIn"):
data = self.recv("keyIn")
header = struct.pack("!2L", KEY, len(data))
packet = header + data
self.send(packet, "outbox")
yield 1
if self.dataReady("inbox"):
data = self.recv("inbox")
header = struct.pack("!2L", DATA, len(data))
packet = header + data
self.send(packet, "outbox")
yield 1
|
atvcaptain/enigma2 | lib/python/Screens/PowerLost.py | Python | gpl-2.0 | 1,103 | 0.023572 | from __future__ import absolute_import
from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Components.config import config
import Screens.Standby
from boxbranding import getMachineBrand, getMachineName
class PowerLost(Screen):
def __init__(self, session):
Screen.__init__(self, session)
self.showMessageBox()
def showMessageBox(self):
if config.usage.boot_action.value == 'normal':
message = _("Your %s %s was not shutdown properly.\n\n"
"Do you want to put it in %s?") % (getMachineBrand(), getMachineName(), config.usage.shutdownNOK_action.value)
self.session.openWithCallback(self.MsgBoxClosed, MessageBox, message, MessageBox.TYPE_YESNO, timeout = int(config.usage.shutdown_msgbox_timeout.value), default = True)
else:
self.MsgBoxClosed(True)
def MsgBoxClosed(self, ret | ):
if ret:
if config.usage.shutdownNOK_action.valu | e == 'deepstandby' and not config.usage.shutdownOK.value:
self.session.open(Screens.Standby.TryQuitMainloop, 1)
elif not Screens.Standby.inStandby:
self.session.open(Screens.Standby.Standby)
self.close()
|
reclosedev/pyautocad | examples/lights.py | Python | bsd-2-clause | 1,272 | 0.005516 | #/usr/bin/env python
#-*- coding: utf-8 -*-
import re
import sys
from collections import namedtuple, defaultdict
from pyautocad import Autocad
from pyautocad import utils
LampEntry = namedtuple('LampEntry', 'number, mark, numxpower')
# \A1;2ARCTIC SMC/SAN 254 \S2х54/2,5;\P300 лк
def iter_lamps(acad, objects):
for obj in acad.iter_objects(('MText', 'MLeader'), block=objects):
try:
text = obj.TextString
except Excep | tion:
continue
text = utils.unformat_mtext(text)
m = re.search(ur'(?P<num>\d+)(?P<mark>.*?)\\S(?P<num_power>.*?)/.*?;', text)
if not m:
continue
print m.group('num'), m.group('mark'), m.group('num_power')
yield LampEntry(m.group('num'), m.group('mark'), m.group('num_power'))
def main():
acad = Autocad()
objects = None
if 'i' in sys.argv[1:]:
objects = acad.get_ | selection('Select objects')
lamps = defaultdict(int)
for lamp in iter_lamps(acad, objects):
lamps[lamp.mark] += int(lamp.number)
print '-' * 79
for mark, number in sorted(lamps.iteritems()):
print '%-20s | %s' % (mark, number)
if __name__ == "__main__":
with utils.timing():
main()
|
i3thuan5/gi2_liau7_khoo3 | 語言模型/apps.py | Python | mit | 99 | 0 | from django.apps import AppConfig
|
class 語言模型Config(AppConfig):
name = ' | 語言模型'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.