repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
tovmeod/anaf
|
anaf/core/middleware/domain.py
|
Python
|
bsd-3-clause
| 1,463
| 0.000684
|
"""
Domain middleware: enables multi-tenancy in a single process
"""
from anaf.core.domains import setup_domain, setup_domain_database
from anaf.core.db import DatabaseNotFound
from anaf.core.conf import settings
from django.http import HttpResponseRedirect
from django.db.utils import DatabaseError
from django.core.urlresolvers import reverse
from pandora import box
class DomainMiddleware(object):
"""Handles multiple domains within the same Django process"""
def process_request(self, request):
"""Identify the current domain and database, set up appropriate variables in the pandora box"""
domain = request.get_host().split('.')[0]
try:
setup_domain(domain)
except DatabaseNotFound:
evergreen_url = getattr(
settings, 'EVERGREEN_BASE_URL', 'http://tree.io/')
return HttpResponseRedirect(evergreen_url)
except DatabaseError:
from django.db import router
from anaf.core.models import ConfigSetting
setup_domain_database(router.db_for_read(ConfigSetting))
return HttpResponseRedirect(reverse('database_setup'))
box['request'] = request
def process_exception(self, request, exception):
|
if isinstance(exception, DatabaseNotFound):
evergreen_url = getattr(
settings, 'EV
|
ERGREEN_BASE_URL', 'http://tree.io/')
return HttpResponseRedirect(evergreen_url)
|
alephobjects/Cura2
|
plugins/LegacyProfileReader/LegacyProfileReader.py
|
Python
|
lgpl-3.0
| 10,008
| 0.006695
|
# Copyright (c) 2017 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
import configparser # For reading the legacy profile INI files.
import io
import json # For reading the Dictionary of Doom.
import math # For mathematical operations included in the Dictionary of Doom.
import os.path # For concatenating the path to the plugin and the relative path to the Dictionary of Doom.
from UM.Application import Application # To get the machine manager to create the new profile in.
from UM.Logger import Logger # Logging errors.
from UM.PluginRegistry import PluginRegistry # For getting the path to this plugin's directory.
from UM.Settings.ContainerRegistry import ContainerRegistry #To create unique profile IDs.
from UM.Settings.InstanceContainer import InstanceContainer # The new profile to make.
from cura.ProfileReader import ProfileReader # The plug-in type to implement.
from cura.Settings.ExtruderManager import ExtruderManager #To get the current extruder definition.
## A plugin that reads profile data from legacy Cura versions.
#
# It reads a profile from an .ini file, and performs some translations on it.
# Not all translations are correct, mind you, but it is a best effort.
class LegacyProfileReader(ProfileReader):
## Initialises the legacy profile reader.
#
# This does nothing since the only other function is basically stateless.
def __init__(self):
super().__init__()
## Prepares the default values of all legacy settings.
#
# These are loaded from the Dictionary of Doom.
#
# \param json The JSON file to load the default setting values from. This
# should not be a URL but a pre-loaded JSON handle.
# \return A dictionary of the default values of the legacy Cura version.
def prepareDefaults(self, json):
defaults = {}
for key in json["defaults"]: # We have to copy over all defaults from the JSON handle to a normal dict.
defaults[key] = json["defaults"][key]
return defaults
## Prepares the local variables that can be used in evaluation of computing
# new setting values from t
|
he old ones.
#
# This fills a dictionary with all settings from the legacy Cura version
# and their values, so that they can be used in evaluating the new setting
# values as Python code.
#
# \param config_parser The ConfigParser that finds t
|
he settings in the
# legacy profile.
# \param config_section The section in the profile where the settings
# should be found.
# \param defaults The default values for all settings in the legacy Cura.
# \return A set of local variables, one for each setting in the legacy
# profile.
def prepareLocals(self, config_parser, config_section, defaults):
copied_locals = defaults.copy() # Don't edit the original!
for option in config_parser.options(config_section):
copied_locals[option] = config_parser.get(config_section, option)
return copied_locals
## Reads a legacy Cura profile from a file and returns it.
#
# \param file_name The file to read the legacy Cura profile from.
# \return The legacy Cura profile that was in the file, if any. If the
# file could not be read or didn't contain a valid profile, \code None
# \endcode is returned.
def read(self, file_name):
if file_name.split(".")[-1] != "ini":
return None
global_container_stack = Application.getInstance().getGlobalContainerStack()
if not global_container_stack:
return None
multi_extrusion = global_container_stack.getProperty("machine_extruder_count", "value") > 1
if multi_extrusion:
Logger.log("e", "Unable to import legacy profile %s. Multi extrusion is not supported", file_name)
raise Exception("Unable to import legacy profile. Multi extrusion is not supported")
Logger.log("i", "Importing legacy profile from file " + file_name + ".")
container_registry = ContainerRegistry.getInstance()
profile_id = container_registry.uniqueName("Imported Legacy Profile")
profile = InstanceContainer(profile_id) # Create an empty profile.
parser = configparser.ConfigParser(interpolation = None)
try:
parser.read([file_name]) # Parse the INI file.
except Exception as e:
Logger.log("e", "Unable to open legacy profile %s: %s", file_name, str(e))
return None
# Legacy Cura saved the profile under the section "profile_N" where N is the ID of a machine, except when you export in which case it saves it in the section "profile".
# Since importing multiple machine profiles is out of scope, just import the first section we find.
section = ""
for found_section in parser.sections():
if found_section.startswith("profile"):
section = found_section
break
if not section: # No section starting with "profile" was found. Probably not a proper INI file.
return None
try:
with open(os.path.join(PluginRegistry.getInstance().getPluginPath("LegacyProfileReader"), "DictionaryOfDoom.json"), "r", -1, "utf-8") as f:
dict_of_doom = json.load(f) # Parse the Dictionary of Doom.
except IOError as e:
Logger.log("e", "Could not open DictionaryOfDoom.json for reading: %s", str(e))
return None
except Exception as e:
Logger.log("e", "Could not parse DictionaryOfDoom.json: %s", str(e))
return None
defaults = self.prepareDefaults(dict_of_doom)
legacy_settings = self.prepareLocals(parser, section, defaults) #Gets the settings from the legacy profile.
#Check the target version in the Dictionary of Doom with this application version.
if "target_version" not in dict_of_doom:
Logger.log("e", "Dictionary of Doom has no target version. Is it the correct JSON file?")
return None
if InstanceContainer.Version != dict_of_doom["target_version"]:
Logger.log("e", "Dictionary of Doom of legacy profile reader (version %s) is not in sync with the current instance container version (version %s)!", dict_of_doom["target_version"], str(InstanceContainer.Version))
return None
if "translation" not in dict_of_doom:
Logger.log("e", "Dictionary of Doom has no translation. Is it the correct JSON file?")
return None
current_printer_definition = global_container_stack.definition
profile.setDefinition(current_printer_definition.getId())
for new_setting in dict_of_doom["translation"]: # Evaluate all new settings that would get a value from the translations.
old_setting_expression = dict_of_doom["translation"][new_setting]
compiled = compile(old_setting_expression, new_setting, "eval")
try:
new_value = eval(compiled, {"math": math}, legacy_settings) # Pass the legacy settings as local variables to allow access to in the evaluation.
value_using_defaults = eval(compiled, {"math": math}, defaults) #Evaluate again using only the default values to try to see if they are default.
except Exception: # Probably some setting name that was missing or something else that went wrong in the ini file.
Logger.log("w", "Setting " + new_setting + " could not be set because the evaluation failed. Something is probably missing from the imported legacy profile.")
continue
definitions = current_printer_definition.findDefinitions(key = new_setting)
if definitions:
if new_value != value_using_defaults and definitions[0].default_value != new_value: # Not equal to the default in the new Cura OR the default in the legacy Cura.
profile.setProperty(new_setting, "value", new_value) # Store the setting in the profile!
if len(profile.getAllKeys()) == 0:
Logger.log("i", "A legacy
|
krux/adspygoogle
|
examples/adspygoogle/dfp/v201204/get_user.py
|
Python
|
apache-2.0
| 1,504
| 0.001995
|
#!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets a user by its id. To create users, run
create_user.py."""
__author__ = 'api.shamjeff@gmail.com (Jeff Sha
|
m)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
# Initialize client object.
cli
|
ent = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service.
user_service = client.GetService('UserService', version='v201204')
# Set the id of the user to get.
user_id = 'INSERT_USER_ID_HERE'
# Get user.
user = user_service.GetUser(user_id)[0]
# Display results.
print ('User with id \'%s\', email \'%s\', and role \'%s\' was found.'
% (user['id'], user['email'], user['roleName']))
|
menzenski/django-year-end-site
|
yearendsite/wsgi.py
|
Python
|
mit
| 399
| 0
|
"""
W
|
SGI config for yearendsite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "yearendsite.settings")
app
|
lication = get_wsgi_application()
|
jdemel/gnuradio
|
grc/gui/Dialogs.py
|
Python
|
gpl-3.0
| 15,494
| 0.002259
|
# Copyright 2008, 2009, 2016 Free Software Foundation, Inc.
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
from __future__ import absolute_import
import sys
import textwrap
from distutils.spawn import find_executable
from gi.repository import Gtk, GLib
from . import Utils, Actions, Constants
from ..core import Messages
class SimpleTextDisplay(Gtk.TextView):
"""
A non user-editable gtk text view.
"""
def __init__(self, text=''):
"""
TextDisplay constructor.
Args:
text: the text to display (string)
"""
Gtk.TextView.__init__(self)
self.set_text = self.get_buffer().set_text
self.set_text(text)
self.set_editable(False)
self.set_cursor_visible(False)
self.set_wrap_mode(Gtk.WrapMode.WORD_CHAR)
class TextDisplay(SimpleTextDisplay):
"""
A non user-editable scrollable text view with popup menu.
"""
def __init__(self, text=''):
"""
TextDisplay constructor.
Args:
text: the text to display (string)
"""
SimpleTextDisplay.__init__(self, text)
self.scroll_lock = True
self.connect("populate-popup", self.populate_popup)
def insert(self, line):
"""
Append text after handling backspaces and auto-scroll.
Args:
line: the text to append (string)
"""
line = self._consume_backspaces(line)
self.get_buffer().insert(self.get_buffer().get_end_iter(), line)
self.scroll_to_end()
def _consume_backspaces(self, line):
"""
Removes text from the buffer if line starts with '\b'
Args:
line: a string which may contain backspaces
Returns:
The string that remains from 'line' with leading '\b's removed.
"""
if not line:
return
# for each \b delete one char from the buffer
back_count = 0
start_iter = self.get_buffer().get_end_iter()
while len(line) > back_count and line[back_count] == '\b':
# stop at the beginning of a line
if not start_iter.starts_line():
start_iter.backward_char()
back_count += 1
# remove chars from buffer
self.get_buffer().delete(start_iter, self.get_buffer().get_end_iter())
return line[back_count:]
def scroll_to_end(self):
""" Update view's scroll position. """
if self.scroll_lock:
buf = self.get_buffer()
mark = buf.get_insert()
buf.move_mark(mark, buf.get_end_iter())
self.scroll_mark_onscreen(mark)
def clear(self):
""" Clear all text from buffer. """
buf = self.get_buffer()
buf.delete(buf.get_start_iter(), buf.get_end_iter())
def save(self, file_path):
"""
Save context of buffer to the given file.
Args:
file_path: location to save buffer contents
"""
with open(file_path, 'w') as logfile:
buf = self.get_buffer()
logfile.write(buf.get_text(buf.get_start_iter(),
buf.get_end_iter(), True))
# Action functions are set by the Application's init function
def clear_cb(self, menu_item, web_view):
""" Callback function to clear the text buffer """
Actions.CLEAR_CONSOLE()
def scroll_back_cb(self, menu_item, web_view):
""" Callback function to toggle scroll lock """
Actions.TOGGLE_SCROLL_LOCK()
def save_cb(self, menu_item, web_view):
""" Callback function to save the buffer """
Actions.SAVE_CONSOLE()
def populate_popup(self, view, menu):
"""Create a popup menu for the scroll lock and clear functions"""
menu.append(Gtk.SeparatorMenuItem())
lock = Gtk.CheckMenuItem(label = "Scroll Lock")
menu.append(lock)
lock.set_active(self.scroll_lock)
lock.connect('activate', self.scroll_back_cb, view)
save = Gtk.ImageMenuItem(label = "Save Console")
menu.append(save)
save.connect('activate', self.save_cb, view)
clear = Gtk.ImageMenuItem(label = "Clear Console")
menu.append(clear)
clear.connect('activate', self.clear_cb, view)
menu.show_all()
return False
class MessageDialogWrapper(Gtk.MessageDialog):
""" Run a message dialog. """
def __init__(self, parent, message_type, buttons, title=None, markup=None,
default_response=None, extra_buttons=None):
"""
Create a modal message dialog.
Args:
message_type: the type of message may be one of:
Gtk.MessageType.INFO
Gtk.MessageType.WARNING
Gtk.MessageType.QUESTION or Gtk.MessageType.ERROR
buttons: the predefined set of buttons to use:
Gtk.ButtonsType.NONE
Gtk.ButtonsType.OK
Gtk.ButtonsType.CLOSE
Gtk.ButtonsType.CANCEL
Gtk.ButtonsType.YES_NO
Gtk.ButtonsType.OK_CANCEL
title: the title of the window (string)
markup: the message text with pango markup
default_response: if set, determines which button is highlighted by default
extra_buttons: a tuple containing pairs of values:
each value is the button's text and the button's return value
"""
Gtk.MessageDialog.__init__(
self, transient_for=parent, modal=True, destroy_with_parent=True,
message_type=message_type, buttons=buttons
)
if title:
self.set_title(title)
if markup:
self.set_markup(markup)
if extra_buttons:
self.add_buttons(*extra_buttons)
if d
|
efault_response:
self.set_default_response(default_response)
def run_and_destroy(self):
response = self.run()
self.hide()
return response
class ErrorsDialog(Gtk.Dialog):
""" Display flowgraph errors. """
def __init__(self, parent, flowgraph):
"""Create a listview of errors"""
Gtk.Dialog.__init__(
self,
title='Errors and Warnings',
transient_for=parent,
modal=Tru
|
e,
destroy_with_parent=True,
)
self.add_buttons(Gtk.STOCK_OK, Gtk.ResponseType.ACCEPT)
self.set_size_request(750, Constants.MIN_DIALOG_HEIGHT)
self.set_border_width(10)
self.store = Gtk.ListStore(str, str, str)
self.update(flowgraph)
self.treeview = Gtk.TreeView(model=self.store)
for i, column_title in enumerate(["Block", "Aspect", "Message"]):
renderer = Gtk.CellRendererText()
column = Gtk.TreeViewColumn(column_title, renderer, text=i)
column.set_sort_column_id(i) # liststore id matches treeview id
column.set_resizable(True)
self.treeview.append_column(column)
self.scrollable = Gtk.ScrolledWindow()
self.scrollable.set_vexpand(True)
self.scrollable.add(self.treeview)
self.vbox.pack_start(self.scrollable, True, True, 0)
self.show_all()
def update(self, flowgraph):
self.store.clear()
for element, message in flowgraph.iter_error_messages():
if element.is_block:
src, aspect = element.name, ''
elif element.is_connection:
src = element.source_block.name
aspect = "Connection to '{}'".format(element.sink_block.name)
elif element.is_port:
src = element.parent_block.name
aspect = "{} '{}'".format('Sink' if element.is_sink else 'Source', element.name)
elif element.is_param:
src = element.parent_block.name
aspect = "Param '{}'".format(element.name)
else:
src = aspect = ''
self.s
|
dongguangming/python-github3
|
pygithub3/requests/git_data/tags.py
|
Python
|
isc
| 412
| 0
|
# -*- encoding: utf-8 -*-
from pygithub3.requests.base import Re
|
quest
from pygithub3.resources.git_data import Tag
class Get(Request):
uri = 'repos/{user}/{repo}/git/tags/{sha}'
resource = Tag
class Create(Request):
uri = 'repos/{user}/{repo}/git/tags'
resource = Tag
body_schema = {
'schema': ('tag', 'message', 'object', 'type', 'tagger'
|
),
'required': ('type',),
}
|
jbrinley/HocrConverter
|
HocrConverter.py
|
Python
|
mit
| 5,986
| 0.019044
|
from reportlab.pdfgen.canvas import Canvas
from reportlab.lib.units import inch
from xml.etree.ElementTree import ElementTree
import Image, re, sys
class HocrConverter():
"""
A class for converting documents to/from the hOCR format.
For details of the hOCR format, see:
http://docs.google.com/View?docid=dfxcv4vc_67g844kf
See also:
http://code.google.com/p/hocr-tools/
Basic usage:
Create a PDF from an hOCR file and an image:
hocr = HocrConverter("path/to/hOCR/file")
hocr.to_pdf("path/to/image/file", "path/to/output/file")
"""
def __init__(self, hocrFileName = None):
self.hocr = None
self.xmlns = ''
self.boxPattern = re.compile('bbox((\s+\d+){4})')
if hocrFileName is not None:
self.parse_hocr(hocrFileName)
def __str__(self):
"""
Return the textual content of the HTML body
"""
if self.hocr is None:
return ''
body = self.hocr.find(".//%sbody"%(self.xmlns))
if body:
return self._get_element_text(body).encode('utf-8') # XML gives unicode
else:
return ''
def _get_element_text(self, element):
"""
Return the textual content of the element and its children
"""
text = ''
if element.text is not None:
text = text + element.text
for child in element.getchildren():
text = text + self._get_element_text(child)
if element.tail is not None:
text = text + element.tail
return text
def element_coordinates(self, element):
"""
Returns a tuple containing the coordinates of the bounding box around
an element
"""
out = (0,0,0,0)
if 'title' in element.attrib:
matches = self.boxPattern.search(element.attrib['title'])
if matches:
coords = matches.group(1).split()
out = (int(coords[0]),int(coords[1]),int(coords[2]),int(coords[3]))
return out
def parse_hocr(self, hocrFileName):
"""
Reads an XML/XHTML file into an ElementTree object
"""
self.hocr = ElementTree()
self.hocr.parse(hocrFileName)
# if the hOCR file has a namespace, ElementTree requires its use to find elements
matches = re.match('({.*})html', self.hocr.getroot().tag)
if matches:
self.xmlns = matches.group(1)
else:
self.xmlns = ''
def to_pdf(self, imageFileName, outFileName, fontname="Courier", fontsize=8):
"""
Creates a PDF file with an image superimposed on top of the text.
Text is positioned according to the bounding box of the lines in
the hOCR file.
The image need not be identical to the image used to create the hOCR file.
It can be scaled, have a lower resolution, different color mode, etc.
"""
if self.hocr is None:
# warn that no text will be embedded in the output PDF
print "Warning: No hOCR file specified. PDF will be image-only."
im = Image.open(imageFileName)
imwidthpx, imheightpx = im.size
if 'dpi' in im.info:
width = float(im.size[0])/im.info['dpi'][0]
height = float(im.size[1])/im.info['dpi'][1]
else:
# we have to make a reasonable guess
# set to None for now and try again using info from hOCR file
width = height = None
ocr_dpi = (300, 300) # a default, in case we can't find it
# get dimensions of the OCR, which may not match the image
if self.hocr is not None:
for div in self.hocr.findall(".//%sdiv"%(self.xmlns)):
if div.attrib['class'] == 'ocr_page':
coords = self.element_coordinates(div)
ocrwidth = coords[2]-coords[0]
ocrheight = coords[3]-coords[1]
if width is None:
# no dpi info with the image
# assume OCR was done at 300 dpi
width = ocrwidth/300
height = ocrheight/300
ocr_dpi = (ocrwidth/width, ocrheight/height)
break # there shouldn't be more than one, and if there is, we don't want it
if width is None:
# no dpi info with the image, and no help from the hOCR file either
# this will probably end up looking awful, so issue
|
a warning
print "Warning: DPI unavailable for image %s. Assuming 96 DPI."%(imageFileName)
width = float(im.size[0])/96
height = float(im.size[1])/96
# create the PDF file
pdf = Canvas(outFileName, pagesize=(width*inch, height*inch), pageCompression=1) # page size in points (1/72 in.)
# put the image on the page, scaled to fill the page
pdf.drawInlineImage(im, 0, 0, width=width*inch, height=height*inch)
if self.hocr is not None:
|
for line in self.hocr.findall(".//%sspan"%(self.xmlns)):
if line.attrib['class'] == 'ocr_line':
coords = self.element_coordinates(line)
text = pdf.beginText()
text.setFont(fontname, fontsize)
text.setTextRenderMode(3) # invisible
# set cursor to bottom left corner of line bbox (adjust for dpi)
text.setTextOrigin((float(coords[0])/ocr_dpi[0])*inch, (height*inch)-(float(coords[3])/ocr_dpi[1])*inch)
# scale the width of the text to fill the width of the line's bbox
text.setHorizScale((((float(coords[2])/ocr_dpi[0]*inch)-(float(coords[0])/ocr_dpi[0]*inch))/pdf.stringWidth(line.text.rstrip(), fontname, fontsize))*100)
# write the text to the page
text.textLine(line.text.rstrip())
pdf.drawText(text)
# finish up the page and save it
pdf.showPage()
pdf.save()
def to_text(self, outFileName):
"""
Writes the textual content of the hOCR body to a file.
"""
f = open(outFileName, "w")
f.write(self.__str__())
f.close()
if __name__ == "__main__":
if len(sys.argv) < 4:
print 'Usage: python HocrConverter.py inputHocrFile inputImageFile outputPdfFile'
sys.exit(1)
hocr = HocrConverter(sys.argv[1])
hocr.to_pdf(sys.argv[2], sys.argv[3])
|
fausthuang/faust-s-test-repo
|
hello world.py
|
Python
|
gpl-2.0
| 76
| 0.013158
|
print "hello"
print "world
|
"
print
|
"hello world"
print hello world
end
haha
|
INRA-LPGP/bio_tools
|
bio_tools/parameters/type_checks.py
|
Python
|
mit
| 1,406
| 0
|
import os
from distutils.util import strtobool
def is_int(value):
"""
Verifies that 'value' is an integer.
"""
try:
int(value)
except ValueError:
return False
else:
return True
def is_float(value):
"""
Verifies that 'value' is a float.
"""
try:
float(value)
except ValueError:
return False
else:
return True
def is_str(value):
"""
Verifies that 'value' is a string.
"""
if not type(value) is str:
return False
else:
return True
def is_bool(value):
"""
Verifies that 'value' is a boolean.
"""
try:
strtobool(value)
except ValueError:
return False
else:
return True
def is_dir(value):
"""
Verifies that 'value' is a path to an existing directory.
"""
if not (type(value) is str and os.path.isdir(value)):
return False
else:
return True
def is_file_i(value):
"
|
""
Verifies that 'value' is a path to an existing file.
"""
if not (type(value) is str and os.path.isfile(value)):
return False
else:
return True
def is_file_o(value):
"""
Verifies that 'value' is a path to a valid directory to create an output
file.
"""
if not (type(value
|
) is str and os.path.split(value)[0]):
return False
else:
return True
|
jkonecny12/anaconda
|
docs/conf.py
|
Python
|
gpl-2.0
| 11,653
| 0.006522
|
# -*- coding: utf-8 -*-
#
# Anaconda documentation build configuration file, created by
# sphinx-quickstart on Wed Sep 18 14:37:01 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os, shutil
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# configuration required to import test modules
for path in ["../pyanaconda/isys/.libs", "../pyanaconda", "../tests", "../tests/lib", "../dracut", "../widgets"]:
sys.path.append(os.path.abspath(path))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
if not os.environ.get("READTHEDOCS") == "True":
extensions.extend(['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.coverage', 'sphinx.ext.inheritance_diagram', 'sphinx.ext.todo'])
shutil.copy2("../CONTRIBUTING.rst", "contributing.rst")
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Anaconda'
copyright = '2015, Red Hat, Inc.' # pylint: disable=redefined-builtin
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
def read_version():
""" Read version from ../configure.ac"""
import re
version_re = re.compile(r"AC_INIT\(\[(.*)\], \[(.*)\], \[(.*)\]\)")
with open("../configure.ac", "r") as f:
for line in f:
m = version_re.match(line)
if m:
return m.group(2)
# The short X.Y version.
version = read_version()
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'html']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
autoclass_content = 'both'
# Inheritence diagram graphviz settings
inheritance_graph_attrs = dict(rankdir="UD", fontsize=14, ratio='auto')
inheritance_node_attrs = dict(style='rounded', margin='"0.07, 0.07"')
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name
|
for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Def
|
ault is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Anacondadoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Anaconda.tex', 'Anaconda Documentation',
'Anaconda Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'Anaconda', 'Anaconda Documentation',
['Anaconda Team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------
|
AlexandreDecan/Dashbird
|
widgets/schoolwidget/migrations/0001_initial.py
|
Python
|
gpl-3.0
| 2,474
| 0.005255
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='MissingTeacherEntry',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('hour', models.PositiveIntegerField(verbose_name='Heure de cours')),
('content', models.TextField(verbose_name='contenu', blank=True)),
('visible', models.BooleanField(default=True, verbose_name='visible ?')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='date de cr\xe9ation')),
('modified', models.DateTimeField(auto_now=True, verbose_name='date de modification')),
],
options={
'verbose_name': 'heure de cours',
'verbose_name_plural': 'heures de cours',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MissingTeacherWidget',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(help_text="Le nom est obligatoire et permet d'identifier votre widget facilement.", max_length=100, verbose_name='nom')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='date de cr\xe9ation')),
('modified', models.DateTimeField(auto_now=True, verbose_name='date de modification')),
('missing', models.TextField(help_text='Un enseignant par ligne.', verbose_name='Enseignants absents', blank=True)),
('hide_emp
|
ty', models.BooleanField(default=True, help_text="Masque les heures de cours pour lesquelles aucuneinformation n'a \xe9t\xe9 entr\xe9e.", verbose_name='Cacher les \xe9l\xe9ments vides')),
|
],
options={
'verbose_name': 'enseignant absent',
'verbose_name_plural': 'enseignants absents',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='missingteacherentry',
name='widget',
field=models.ForeignKey(related_name='hours', to='schoolwidget.MissingTeacherWidget'),
preserve_default=True,
),
]
|
google/CFU-Playground
|
proj/hps_accel/gateware/gen1/set.py
|
Python
|
apache-2.0
| 4,133
| 0
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from amaranth import Signal, unsigned
from amaranth.hdl.dsl import Module
from amaranth_cfu import InstructionBase, SimpleElaboratable
from .constants import Constants
from ..stream import Endpoint, connect
class ConfigurationRegister(SimpleElaboratable):
"""A register set by the CPU.
Allows a CPU to provide data t
|
o gateware.
Attributes
----------
output: Endpoint(unsigned(32))
An output stream of values. A new value onto the stream whenever
the register is set.
value: unsigned(32), out
The value held by the register.
new_en: Signal(1), in
Indicates to register that a new value is being presented on new_value
new_value: unsigned(32), in
New value for the register. Read when "set" is asserted.
"""
def __init__(self):
|
super().__init__()
self.output = Endpoint(unsigned(32))
self.value = self.output.payload
self.new_en = Signal()
self.new_value = Signal(32)
def elab(self, m):
with m.If(self.output.is_transferring()):
m.d.sync += self.output.valid.eq(0)
with m.If(self.new_en):
m.d.sync += self.value.eq(self.new_value)
m.d.sync += self.output.valid.eq(1)
class SetInstruction(InstructionBase):
"""An instruction used to set values into a register of the CFU.
Sets a configuration register from in0.
Attributes
----------
output_streams: dict[id, Endpoint[unsigned(32)]], out
Value output for each register.
values: dict[id, unsigned(32)], out
Values as set into registers.
write_strobes: dict[id, Signal(1)], out
Asserted for one cycle when the corresponding register id is written.
"""
# The list of all register IDs that may be set
REGISTER_IDS = [
Constants.REG_FILTER_NUM_WORDS,
Constants.REG_INPUT_NUM_WORDS,
Constants.REG_INPUT_OFFSET,
Constants.REG_SET_FILTER,
Constants.REG_SET_INPUT,
Constants.REG_OUTPUT_OFFSET,
Constants.REG_OUTPUT_MIN,
Constants.REG_OUTPUT_MAX,
Constants.REG_FILTER_INPUT_NEXT,
Constants.REG_VERIFY,
Constants.REG_OUTPUT_PARAMS_RESET,
Constants.REG_OUTPUT_BIAS,
Constants.REG_OUTPUT_MULTIPLIER,
Constants.REG_OUTPUT_SHIFT,
]
def __init__(self):
super().__init__()
self.output_streams = {
i: Endpoint(
unsigned(32)) for i in self.REGISTER_IDS}
self.values = {i: Signal(32) for i in self.REGISTER_IDS}
self.write_strobes = {i: Signal(1) for i in self.REGISTER_IDS}
def elab(self, m: Module):
registers = {i: ConfigurationRegister() for i in self.REGISTER_IDS}
for i, register in registers.items():
m.submodules[f"reg_{i:02x}"] = register
m.d.comb += connect(register.output, self.output_streams[i])
m.d.comb += self.values[i].eq(register.value)
m.d.comb += self.write_strobes[i].eq(0) # strobes off by default
with m.If(self.start):
# Consider making self.done.eq(1) combinatorial
m.d.sync += self.done.eq(1)
with m.Switch(self.funct7):
for i, register in registers.items():
with m.Case(i):
m.d.comb += register.new_en.eq(1)
m.d.comb += register.new_value.eq(self.in0)
m.d.comb += self.write_strobes[i].eq(1)
with m.Else():
m.d.sync += self.done.eq(0)
|
morelab/weblabdeusto
|
server/src/weblab/core/coordinator/checker.py
|
Python
|
bsd-2-clause
| 5,307
| 0.006785
|
#!/usr/bin/env python
#-*-*- encoding: utf-8 -*-*-
#
# Copyright (C) 2005 onwards University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: Pablo Orduña <pablo@ordunya.com>
#
from __future__ import print_function, unicode_literals
import traceback
import voodoo.log as log
from voodoo.gen import CoordAddress
class ResourcesChecker(object):
def __init__(self, coordinator):
self.coordinator = coordinator
self.locator = coordinator.locator
self.current_lab = None
def check(self):
try:
experiments_per_laboratory = self.coordinator.list_laboratories_addresses()
# Use a common broken_resources to avoid endless loops if a resource is registered
# in labs in more than one laboratory server (and one might state that it works while
# other might state that it doesn't).
broken_resources = {}
for laboratory_address_str in experiments_per_laboratory:
self.current_lab = laboratory_address_str
new_broken_resources = self.check_laboratory(laboratory_address_str, experiments_per_laboratory[laboratory_address_str])
for broken_resource in new_broken_resources:
if broken_resource in broken_resources:
broken_resources[broken_resource] += ';' + new_broken_resources[broken_resource]
else:
broken_resources[broken_resource] = new_broken_resources[broken_resource]
all_notifications = {
# (recipient1, recipient2) : [message1, message2, message3],
# (recipient1, ) : [message4, message5],
# (recipient3, ) : [message6, message7],
}
for laboratory_address_str in experiments_per_laboratory:
experiments = experiments_per_laboratory[laboratory_address_str]
for experiment in experiments:
laboratory_resource = experiments[experiment]
if
|
laboratory_resource in broken_resources:
notifications = self.coordinator.mark_resource_as_broken(laboratory_resource, broken_resources[laboratory_resource])
else:
notifications = self.coordinator.mark_resource_as_fixed(laboratory_resource)
for recipients in notifications:
if recipients in all_notifications:
|
all_notifications[recipients].extend(notifications[recipients])
else:
all_notifications[recipients] = list(notifications[recipients])
if all_notifications:
self.coordinator.notify_status(all_notifications)
except:
traceback.print_exc()
log.log( ResourcesChecker, log.level.Critical,
"Error checking resources.")
log.log_exc(ResourcesChecker, log.level.Critical)
def check_laboratory(self, address_str, experiments):
""" Checks in that laboratory address which experiments are broken and which ones are working.
:param address_str: laboratory address, e.g. "laboratory:general_laboratory@server1"
:param experiments: dictionary of experiments: resources, e.g. { "exp1|ud-fpga|FPGA experiments" : "fpga1@fpga boards"}
"""
broken_resources = {
# resource_id : error_message
}
try:
address = CoordAddress.translate(address_str)
server = self.locator.get(address, timeout=1800) # Extended timeout for this method
failing_experiments = server.check_experiments_resources()
#
# failing_experiments is a dictionary such as:
# {
# experiment_instance_id : error_message
# }
#
for failing_experiment in failing_experiments:
if not failing_experiment in experiments:
log.log( ResourcesChecker, log.level.Error,
"Laboratory server %s reported that experiment %s was failing; however this laboratory does NOT manage this experiment. Attack?" % (address_str, failing_experiment))
continue
#
# The error for a resource will be concatenated
#
broken_resource = experiments[failing_experiment]
error_message = failing_experiments[failing_experiment]
if broken_resource in broken_resources:
broken_resources[broken_resource] = broken_resources[broken_resource] + ';' + error_message
else:
broken_resources[broken_resource] = error_message
except:
traceback.print_exc()
log.log( ResourcesChecker, log.level.Critical,
"Error checking resources of laboratory %s " % address_str)
log.log_exc(ResourcesChecker, log.level.Critical)
return broken_resources
|
cactusbin/nyt
|
matplotlib/examples/event_handling/data_browser.py
|
Python
|
unlicense
| 2,233
| 0.011196
|
import numpy as np
class PointBrowser:
"""
Click on a point to select and highlight it -- the data that
generated the point will be shown in the lower axes. Use the 'n'
and 'p' keys to browse through the next and previous points
"""
def __init__(self):
self.lastind = 0
self.text = ax.text(0.05, 0.95, 'selected: none',
transform=ax.transAxes, va='top')
self.selected, = ax.plot([xs[0]], [ys[0]], 'o', ms=12, alpha=0.4,
color='yellow', visible=False)
def onpress(self, event):
if self.lastind is None: return
if event.key not in ('n', 'p'): return
if event.key=='n': inc = 1
else: inc = -1
self.lastind += inc
self.lastind = np.clip(self.lastind, 0, len(xs)-1)
self.update()
def onpick(self, event):
if event.artist!=line: return True
N = len(event.ind)
if not N: return True
# the click locations
x = event.mouseevent.xdata
y = event.mouseevent.ydata
distances = np.hypot(x-xs[event.ind], y-ys[event.ind])
indmin = distances.argmin()
dataind = event.ind[indmin]
self.lastind = dataind
self.update()
def update(self):
if self.lastind is None: return
dataind = self.lastind
ax2.cla()
ax2.plot(X[dataind])
ax2.text(0.05, 0.9, 'mu=%1.3f\nsigma=%1.3f'%(xs[dataind], ys[dataind]),
transform=ax2.transAxes, va='top')
ax2.set_ylim(-0.5, 1.5)
self.selected.set_visible(True)
self.selected.set_data(xs[dataind], ys[dataind])
self.text.set_text('selected: %d'%dataind)
fig.canvas.draw()
if __name__ == '__main__':
import matplotlib.pyplot as plt
X = np.random.rand(100, 200)
xs = np.mean(X, axis=1)
ys = np.std(X, axis=1)
fig, (ax, ax2) = plt.subplots(2, 1)
ax.set_title('click on point to plot time series')
line, = ax.plot(xs, ys, 'o', picker=5) # 5 points tolerance
|
browser = PointBrowser()
fig.canvas.mpl_connect('pick_event', browser.onpick)
fig.canv
|
as.mpl_connect('key_press_event', browser.onpress)
plt.show()
|
facelessuser/sublime-markdown-popups
|
st3/mdpopups/pygments/lexers/pascal.py
|
Python
|
mit
| 32,536
| 0.001014
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.pascal
~~~~~~~~~~~~~~~~~~~~~~
Lexers for Pascal family languages.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from ..lexer import Lexer, RegexLexer, include, bygroups, words, \
using, this, default
from ..util import get_bool_opt, get_list_opt
from ..token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error
from ..scanner import Scanner
from .modula2 import Modula2Lexer
__all__ = ['DelphiLexer', 'AdaLexer']
class DelphiLexer(Lexer):
"""
For `Delphi <http://www.borland.com/delphi/>`_ (Borland Object Pascal),
Turbo Pascal and Free Pascal source code.
Additional options accepted:
`turbopascal`
Highlight Turbo Pascal specific keywords (default: ``True``).
`delphi`
Highlight Borland Delphi specific keywords (default: ``True``).
`freepascal`
Highlight Free Pascal specific keywords (default: ``True``).
`units`
A list of units that should be considered builtin, supported are
``System``, ``SysUtils``, ``Classes`` and ``Math``.
Default is to consider all of them builtin.
"""
name = 'Delphi'
aliases = ['delphi', 'pas', 'pascal', 'objectpascal']
filenames = ['*.pas']
mimetypes = ['text/x-pascal']
TURBO_PASCAL_KEYWORDS = (
'absolute', 'and', 'array', 'asm', 'begin', 'break', 'case',
'const', 'constructor', 'continue', 'destructor', 'div', 'do',
'downto', 'else', 'end', 'file', 'for', 'function', 'goto',
'if', 'implementation', 'in', 'inherited', 'inline', 'interface',
'label', 'mod', 'nil', 'not', 'object', 'of', 'on', 'operator',
'or', 'packed', 'procedure', 'program', 'record', 'reintroduce',
'repeat', 'self', 'set', 'shl', 'shr', 'string', 'then', 'to',
'type', 'unit', 'until', 'uses', 'var', 'while', 'with', 'xor'
)
DELPHI_KEYWORDS = (
'as', 'class', 'except', 'exports', 'finalization', 'finally',
'initialization', 'is', 'library', 'on', 'property', 'raise',
'threadvar', 'try'
)
FREE_PASCAL_KEYWORDS = (
'dispose', 'exit', 'false', 'new', 'true'
)
BLOCK_KEYWORDS = set((
'begin', 'class', 'const', 'constructor', 'destructor', 'end',
'finalization', 'function', 'implementation', 'initialization',
'label', 'library', 'operator', 'procedure', 'program', 'property',
'record', 'threadvar', 'type', 'unit', 'uses', 'var'
))
FUNCTION_MODIFIERS = set((
'alias', 'cdecl', 'export', 'inline', 'interrupt', 'nostackframe',
'pascal', 'register', 'safecall', 'softfloat', 'stdcall',
'varargs', 'name', 'dynamic', 'near', 'virtual', 'external',
'override', 'assembler'
))
# XXX: those aren't global. but currently we know no way for defining
# them just for the type context.
DIRECTIVES = set((
'absolute', 'abstract', 'assembler', 'cppdecl', 'default', 'far',
'far16', 'forward', 'index', 'oldfpccall', 'private', 'protected',
'published', 'public'
))
BUILTIN_TYPES = set((
'ansichar', 'ansistring', 'bool', 'boolean', 'byte', 'bytebool',
'cardinal', 'char', 'comp', 'currency', 'double', 'dword',
'extended', 'int64', 'integer', 'iunknown', 'longbool', 'longint',
'longword', 'pansichar', 'pansistring', 'pbool', 'pboolean',
'pbyte', 'pbytearray', 'pcardinal', 'pchar', 'pcomp', 'pcurrency',
'pdate', 'pdatetime', 'pdouble', 'pdword', 'pextended', 'phandle',
'pint64', 'pinteger', 'plongint', 'plongword', 'pointer',
'ppointer', 'pshortint', 'pshortstring', 'psingle', 'psmallint',
'pstring', 'pvariant', 'pwidechar', 'pwidestring', 'pword',
'pwordarray', 'pwordbool', 'real', 'real48', 'shortint',
'shortstring', 'single', 'smallint', 'string', 'tclass', 'tdate',
'tdatetime', 'textfile', 'thandle', 'tobject', 'ttime', 'variant',
'widechar', 'widestring', 'word', 'wordbool'
))
BUILTIN_UNITS = {
'System': (
'abs', 'acquireexceptionobject', 'addr', 'ansitoutf8',
'append', 'arctan', 'assert', 'assigned', 'assignfile',
'beginthread', 'blockread', 'blockwrite', 'break', 'chdir',
'chr', 'close', 'closefile', 'comptocurrency', 'comptodouble',
'concat', 'continue', 'copy', 'cos', 'dec', 'delete',
'dispose', 'doubletocomp', 'endthread', 'enummodules',
'enumresourcemodules', 'eof', 'eoln', 'erase', 'exceptaddr',
'exceptobject', 'exclude', 'exit', 'exp', 'filepos', 'filesize',
'fillchar', 'finalize', 'findclasshinstance', 'findhinstance',
'findresourcehinstance', 'flush', 'frac', 'freemem',
'get8087cw', 'getdir', 'getlasterror', 'getmem',
'getmemorymanager', 'getmodulefilename', 'getvariantmanager',
'halt', 'hi', 'high', 'inc', 'include', 'initialize', 'insert',
'int', 'ioresult', 'ismemorymanagerset', 'isvariantmanagerset',
'length', 'ln', 'lo', 'low', 'mkd
|
ir', 'move', 'new', 'odd',
'olestrtostring', 'olestrtostrvar', 'ord', 'paramcount',
'paramstr', 'pi', 'pos', 'pred', 'ptr', 'pucs4chars', 'random',
'randomize', 'read', 'readln', 'reallocmem',
'releaseexceptionobject', 'rename', 'reset',
|
'rewrite', 'rmdir',
'round', 'runerror', 'seek', 'seekeof', 'seekeoln',
'set8087cw', 'setlength', 'setlinebreakstyle',
'setmemorymanager', 'setstring', 'settextbuf',
'setvariantmanager', 'sin', 'sizeof', 'slice', 'sqr', 'sqrt',
'str', 'stringofchar', 'stringtoolestr', 'stringtowidechar',
'succ', 'swap', 'trunc', 'truncate', 'typeinfo',
'ucs4stringtowidestring', 'unicodetoutf8', 'uniquestring',
'upcase', 'utf8decode', 'utf8encode', 'utf8toansi',
'utf8tounicode', 'val', 'vararrayredim', 'varclear',
'widecharlentostring', 'widecharlentostrvar',
'widechartostring', 'widechartostrvar',
'widestringtoucs4string', 'write', 'writeln'
),
'SysUtils': (
'abort', 'addexitproc', 'addterminateproc', 'adjustlinebreaks',
'allocmem', 'ansicomparefilename', 'ansicomparestr',
'ansicomparetext', 'ansidequotedstr', 'ansiextractquotedstr',
'ansilastchar', 'ansilowercase', 'ansilowercasefilename',
'ansipos', 'ansiquotedstr', 'ansisamestr', 'ansisametext',
'ansistrcomp', 'ansistricomp', 'ansistrlastchar', 'ansistrlcomp',
'ansistrlicomp', 'ansistrlower', 'ansistrpos', 'ansistrrscan',
'ansistrscan', 'ansistrupper', 'ansiuppercase',
'ansiuppercasefilename', 'appendstr', 'assignstr', 'beep',
'booltostr', 'bytetocharindex', 'bytetocharlen', 'bytetype',
'callterminateprocs', 'changefileext', 'charlength',
'chartobyteindex', 'chartobytelen', 'comparemem', 'comparestr',
'comparetext', 'createdir', 'createguid', 'currentyear',
'currtostr', 'currtostrf', 'date', 'datetimetofiledate',
'datetimetostr', 'datetimetostring', 'datetimetosystemtime',
'datetimetotimestamp', 'datetostr', 'dayofweek', 'decodedate',
'decodedatefully', 'decodetime', 'deletefile', 'directoryexists',
'diskfree', 'disksize', 'disposestr', 'encodedate', 'encodetime',
'exceptionerrormessage', 'excludetrailingbackslash',
'excludetrailingpathdelimiter', 'expandfilename',
'expandfilenamecase', 'expanduncfilename', 'extractfiledir',
'extractfiledrive', 'extractfileext', 'extractfilename',
'extractfilepath', 'extractrelativepath', 'extractshortpathname',
'fileage', 'fileclose', 'filecreate', 'filedatetodatetime',
'fileexists', 'filegetattr', 'filegetdate', 'fileisreadonly',
'fileopen', 'fileread', 'filesearch', 'fileseek', 'fil
|
vileopratama/vitech
|
docs/tutorials/ebook/Odoo Development Cookbook/OdooDevelopmentCookbook_Code/Chapter06_code/Ch06_R05/some_model_ch06r05/models.py
|
Python
|
mit
| 913
| 0
|
# coding: utf-8
from openerp import models, ap
|
i, fields
class LibraryReturnsWizard(models.TransientModel):
_name = 'library.returns.wizard'
member_id = fields.Many2one('library.member', 'Member')
book_ids = fields.Many2many('library.boo
|
k', 'Books')
@api.multi
def record_returns(self):
loan = self.env['library.book.loan']
for rec in self:
loans = loan.search(
[('state', '=', 'ongoing'),
('book_id', 'in', self.book_ids.ids),
('member_id', '=', self.member_id.id)]
)
loans.write({'state': 'done'})
@api.onchange('member_id')
def onchange_member(self):
loan = self.env['library.book.loan']
loans = loan.search(
[('state', '=', 'ongoing'),
('member_id', '=', self.member_id.id)]
)
self.book_ids = loans.mapped('book_id')
|
RexChenjq/AWS-Image-Backup
|
purge latest.py
|
Python
|
mit
| 2,667
| 0.009374
|
import boto3
import collections
import datetime
import time
import sys
import botocore
ec2_client = boto3.client('ec2',region_name='ap-southeast-2')
ec2_resource = boto3.resource('ec2',region_name='ap-southeast-2')
sns = boto3.client('sns')
images_all = ec2_resource.images.filter(Owners=["self"])
today_fmt = (datetime.datetime.now()+ datetime.timedelta(hours=11)).strftime('%Y-%m-%d')
today_date = time.strptime(today_fmt, '%Y-%m-%d')
def
|
lambda_handler(event, context):
try:
images_to_remove=[]
toremoveimagecount = 0
snapshotcount = 0
totalimagecount=0
for image in images_all:
if (image.name.startswith('Lambda-')):
totalimagecount+=1
try:
if image.tags is not None:
deletion_date = [
t.get('Value') for t in image.tags
i
|
f t['Key'] == 'DeleteOn'][0]
delete_date = time.strptime(deletion_date, "%Y-%m-%d")
except IndexError:
deletion_date = False
delete_date = False
if delete_date <= today_date:
images_to_remove.append(image.id)
print "============="
print "About to deregister the following AMIs:"
print images_to_remove
snapshots = ec2_client.describe_snapshots(OwnerIds=["self"])['Snapshots']
for image in images_to_remove:
toremoveimagecount += 1
print "deregistering image %s" % image
amiResponse = ec2_client.deregister_image(
DryRun=False,
ImageId=image,
)
for snapshot in snapshots:
if snapshot['Description'].find(image) > 0:
snapshotcount += 1
snap = ec2_client.delete_snapshot(SnapshotId=snapshot['SnapshotId'])
print "Deleting snapshot " + snapshot['SnapshotId']
print "-------------"
result = "Deleted %d AMIs and %d corresponding snapshots" %(toremoveimagecount,snapshotcount)
print result
response = sns.publish(
TopicArn='arn:aws:sns:ap-southeast-2:352138128272:lambda_ami_backup',
Message= result,
Subject='Purge Success')
#SNS email
except botocore.exceptions.ClientError as e:
result = e.response['Error']['Message']
print result
response = sns.publish(
TopicArn='arn:aws:sns:ap-southeast-2:352138128272:lambda_ami_backup',
Message= result,
Subject='Purge Failed')
#SNS email
|
e-gob/plataforma-kioscos-autoatencion
|
scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/module_utils/facts/network/dragonfly.py
|
Python
|
bsd-3-clause
| 1,202
| 0.000832
|
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, eithe
|
r version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should
|
have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.facts.network.base import NetworkCollector
from ansible.module_utils.facts.network.generic_bsd import GenericBsdIfconfigNetwork
class DragonFlyNetwork(GenericBsdIfconfigNetwork):
"""
This is the DragonFly Network Class.
It uses the GenericBsdIfconfigNetwork unchanged.
"""
platform = 'DragonFly'
class DragonFlyNetworkCollector(NetworkCollector):
_fact_class = DragonFlyNetwork
_platform = 'DragonFly'
|
unnikrishnankgs/va
|
venv/lib/python3.5/site-packages/nbconvert/preprocessors/convertfigures.py
|
Python
|
bsd-2-clause
| 1,450
| 0.002759
|
"""Module containing a preprocessor
|
that converts outputs in the notebook from
one format to another.
"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD Li
|
cense.
from .base import Preprocessor
from traitlets import Unicode
class ConvertFiguresPreprocessor(Preprocessor):
"""
Converts all of the outputs in a notebook from one format to another.
"""
from_format = Unicode(help='Format the converter accepts').tag(config=True)
to_format = Unicode(help='Format the converter writes').tag(config=True)
def __init__(self, **kw):
"""
Public constructor
"""
super(ConvertFiguresPreprocessor, self).__init__(**kw)
def convert_figure(self, data_format, data):
raise NotImplementedError()
def preprocess_cell(self, cell, resources, cell_index):
"""
Apply a transformation on each cell,
See base.py
"""
# Loop through all of the datatypes of the outputs in the cell.
for output in cell.get('outputs', []):
if output.output_type in {'execute_result', 'display_data'} \
and self.from_format in output.data \
and self.to_format not in output.data:
output.data[self.to_format] = self.convert_figure(
self.from_format, output.data[self.from_format])
return cell, resources
|
dianvaltodorov/happy-commas
|
db_create.py
|
Python
|
mit
| 566
| 0
|
#!env/bin/python
"""Create the database"""
from migrate.versioning import api
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
from app import db
import os.path
db.create_all()
if not os.path.exists(SQLALCHEMY_MIGRATE_REPO):
api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository')
api.version_contro
|
l(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
else:
api.version_control(SQLALCHEMY_DATABASE_URI,
|
SQLALCHEMY_MIGRATE_REPO,
api.version(SQLALCHEMY_MIGRATE_REPO))
|
jittat/adm2
|
application/views/update.py
|
Python
|
agpl-3.0
| 12,858
| 0.003085
|
# -*- coding: utf-8 -*-
import datetime
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.core.urlresolvers import reverse
from django.conf import settings
from django import forms
from commons.decorators import submitted_applicant_required
from commons.decorators import within_submission_deadline
from commons.utils import submission_deadline_passed, redirect_to_deadline_error, validate_phone_number
from application.views.form_views import prepare_major_form
from application.forms.handlers import handle_major_form
from application.forms.handlers import assign_major_pref_to_applicant
from application.forms.handlers import handle_education_form
from application.forms.handlers import handle_address_form
from application.forms.handlers import handle_personal_info_form
from application.forms import EducationForm, SingleMajorPreferenceForm
from application.models import Applicant, MajorPreference, Major, PersonalInfo
from commons.models import Log
from commons.local import APP_TITLE_FORM_CHOICES
from commons.email import send_sub_method_change_notice_by_email
from application.forms.widgets import ThaiSelectDateWidget
def update_major_single_choice(request):
applicant = request.applicant
if request.method == 'POST':
if 'cancel' not in request.POST:
form = SingleMajorPreferenceForm(request.POST)
if form.is_valid():
assign_major_pref_to_applicant(applicant,
[form.cleaned_data['major'].number])
request.session['notice'] = 'การแก้ไขอันดับสาขาวิชาเรียบร้อย'
return HttpResponseRedirect(reverse('status-index'))
else:
request.session['notice'] = 'อันดับสาขาวิชาไม่ถูกแก้ไข'
return HttpResponseRedirect(reverse('status-index'))
else:
if applicant.has_major_preference():
pref = applicant.preference.majors
if len(pref)==0:
prev_major = None
else:
majors = dict([(int(m.number), m) for m in Major.get_all_majors()])
prev_major = majors[pref[0]]
form = SingleMajorPreferenceForm(initial={'major': prev_major.id})
# add step info
form_data = {}
form_data['step_name'] = 'แก้ไขอันดับสาขาวิชา'
form_data['can_log_out'] = True
form_data['form'] = form
return render_to_response('application/update/majors_single.html',
form_data)
def update_majors_as_major_lists(request):
"""
WARNINGS:
Unused in the current version.
This is for the case when the number of choices is small.
"""
if settings.MAX_MAJOR_RANK == 1:
return update_major_single_choice(request)
applicant = request.applicant
if request.method == 'POST':
if 'cancel' not in request.POST:
result, major_list, errors = handle_major_form(request)
if result:
request.session['notice'] = 'การแก้ไขอันดับสาขาวิชาเรียบร้อย'
return HttpResponseRedirect(reverse('status-index'))
else:
request.session['notice'] = 'อันดับสาขาวิชาไม่ถูกแก้ไข'
return HttpResponseRedirect(reverse('status-index'))
pref_ranks = MajorPreference.major_list_to_major_rank_list(major_list)
form_data = prepare_major_form(applicant, pref_ranks, errors)
else:
if applicant.has_major_preference():
pref_ranks = applicant.preference.to_major_rank_list()
else:
pref_ranks = [None] * len(majors)
form_data = prepare_major_form(applicant, pref_ranks)
# add step info
form_data['step_name'] = 'แก้ไขอันดับสาขาวิชา'
form_data['can_log_out'] = True
return render_to_response('application/update/majors.html',
form_data)
@within_submission_deadline
@submitted_applicant_required
def update_majors(request):
max_major_rank = settings.MAX_MAJOR_RANK
if max_major_rank == 1:
return update_major_single_choice(request)
from form_views import prepare_major_selections
applicant = request.applicant
form_data = { 'majors': Major.get_all_majors() }
if request.method == 'POST':
if 'cancel' not in request.POST:
result, pref_list, errors = handle_major_form(request,
max_major_rank)
log = Log.create("Update major pref: %s from %s" %
(applicant.id,request.META['REMOTE_ADDR']))
if result:
request.session['notice'] = 'การแก้ไขอันดับสาขาวิชาเรียบร้อย'
return HttpResponseRedirect(reverse('status-index'))
selections = prepare_major_selections(pref_list, max_major_rank)
form_data['errors'] = errors
else:
request.session['notice'] = 'อันดับสาขาวิชาไม่ถูกแก้ไข'
return HttpResponseRedirect(reverse('status-index'))
else:
if applicant.has_major_preference():
pref_list = applicant.preference.get_major_list()
else:
pref_list = [None] * max_major_rank
selections = prepare_major_selections(pref_list, max_major_rank)
# add step info
form_data['step_name'] = 'แก้ไขอันดับสาขาวิชา'
form_data['can_log_out'] = True
form_data['selections'] = selections
form_data['max_major_rank'] = max_major_rank
return render_to_response('application/update/majors.html',
form_data)
@within_submission_deadline
@submitted_applicant_required
def update_education(request):
applicant = request.applicant
old_education = applicant.get_educational_info_or_none()
result, form = handle_education_form(request, old_education)
if result:
request.session['notice'] = 'การแก้ไขข้อมูลการศึกษาเรียบร้อย'
return HttpResponseRedirect(reverse('status-index'))
elif 'cancel' in request.POST:
request.session['notice'] = 'ข้อมูลการศึกษาไม่ถูกแก้ไข'
return HttpResponseRedirect(reverse('status-index'))
return render_to_response('application/update/education.html',
{'form': form,
'can_log_out': True,
'applicant': applicant })
@within_submission_deadline
@submitted_applicant_required
def update_address(request):
applicant = request.applicant
if not applicant.has_address():
return HttpResponseForbidden()
result, hform, cform = handle_address_form(request)
if result:
request.session['notice'] = 'การแก้ไขที่อยู่เรียบร้อย'
return HttpResponseRedirect(reverse('status-index'))
elif 'cancel' in request.POST:
request.session['notice'] = 'ข้อมูลที่อยู่ไม่ถูกแก้ไข'
return HttpResponseRedirect(reverse('status-index'))
return render_to_response('application/update/address.html',
{ 'home_address_form': hform,
'contact_address_form': cform,
'can_log_out': True,
'applicant': applicant })
THIS_YEAR = datetime.date.today().year
APPLICANT_BIRTH_YEARS = range(THIS_YEAR-30,THIS_YEAR-10)
class PersonalInfoWithFullnameForm(forms.Form):
title = forms.ChoiceField(choices=APP_TITLE_FORM_CHOICES)
first_name = forms.CharField(label=u'ชื่อ')
last_name = forms.CharField(label=u'นามสกุล')
birth_date = forms.DateField(
widget=ThaiSelectDateWidget(years=APPLICANT_BIRTH_YEARS),
label=u"วันเกิด")
phone_number = forms.CharField(label=u'หมายเลขโทรศัพท์')
nat
|
ionality = forms.CharField(label="สัญชาติ")
ethnicity = forms.CharField(label="เชื้อชาติ")
def clean_phone_number(self):
if not validate_phone_number(self.cleaned_data['phone_number']):
raise forms.ValidationError("หมายเลขโทรศัพท์ไม่ถูกต้อง")
return self.cleaned_data['phone_number']
@within_submission_deadl
|
ine
@submitted_applicant_required
def update_personal_info(request):
applicant = reques
|
jokkebk/euler
|
p144.py
|
Python
|
mit
| 538
| 0.013011
|
inside = lambda
|
x, y: 4*x*x+y*y <= 100
def coll(sx, sy, dx, dy):
m = 0
for p in range(32):
m2 = m + 2**(-p)
if inside(sx + dx * m2, sy + dy * m2): m = m2
return (sx + dx*m, sy + dy*m)
def norm(x, y):
l = (x*x + y*y)**0.5
return (x/l, y/l)
sx, sy = 0, 10.1
dx, dy = 1.4, -19.7
for I in range(999):
sx, sy = coll(sx, sy, dx, dy)
if sy > 0 and abs(sx) <= 0.01:
print(I)
break
mx, my = norm(1, -4*sx/sy)
d = mx*dx + my*dy
dx, dy = -dx + 2 * mx * d, -dy
|
+ 2 * my * d
|
lukas/ml-class
|
examples/keras-transfer/resnet50-inspect.py
|
Python
|
gpl-2.0
| 517
| 0.001934
|
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.preprocessing import image
|
from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions
import numpy as np
model = ResNet50(weights='imagenet')
img_path = 'elephant.jpg'
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
model.summary()
preds = model.predict(x)
print('Predicted
|
:', decode_predictions(preds, top=3)[0])
|
abutcher/openshift-ansible
|
roles/lib_openshift/src/lib/base.py
|
Python
|
apache-2.0
| 21,696
| 0.001244
|
# pylint: skip-file
# flake8: noqa
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
def locate_oc_binary():
''' Find and return oc binary file '''
# https://github.com/openshift/openshift-ansible/issues/3410
# oc can be in /usr/local/bin in some cases, but that may not
# be in $PATH due to ansible/sudo
paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
oc_binary = 'oc'
# Use shutil.which if it is available, otherwise fallback to a naive path search
try:
which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
if which_result is not None:
oc_binary = which_result
except AttributeError:
for path in paths:
if os.path.exists(os.path.join(path, oc_binary)):
oc_binary = os.path.join(path, oc_binary)
break
return oc_binary
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, edits=None, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, res['results'][0], separator=sep)
updated = False
if content is not None:
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
updated = True
elif edits is not None:
results = Yedit.process_edits(edits, yed)
if results['changed']:
updated = True
if updated:
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
#
|
We are removing the 'resourceVersion' to handle
# a race condition when modifying oc objects
yed = Yedit(fname)
results = yed.delete('metadata.resourceVersion')
if results[0]:
|
yed.write()
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
else:
raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-p')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = Utils.create_tmpfile(template_name + '-')
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, name=None, selector=None, field_selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
if field_selector is not None:
cmd.append('--field-selector={}'.format(field_selector))
# Name cannot be used with selector or field_selector.
if selector is None and field_selector is None and name is not None:
cmd.append(name)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
|
abhishekgahlot/unirest-python
|
setup.py
|
Python
|
mit
| 431
| 0
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='Unirest',
version='1.1.7',
|
author='Mashape',
author_email='opensource@mashape.com',
packages=['unirest'],
url='https://github.com/Mashape/unirest-python',
license='LICENSE',
description='Simplified, lightweight HTTP client library',
install_requires=[
"poster >= 0.8.1"
]
)
| |
birdland/dlkit-doc
|
dlkit/mongo/osid/summary_doc.py
|
Python
|
mit
| 17,271
| 0
|
# -*- coding: utf-8 -*-
"""Core Service Interface Definitions
osid version 3.0.0
The Open Service Interface Definitions (OSIDs) is a service-based
architecture to promote software interoperability. The OSIDs are a large
suite of interface contract specifications that describe the integration
points among services and system components for the purpose of creating
choice among a variety of different and independently developed
applications and systems, allowing independent evolution of software
components within a complex system, and federated service providers.
The OSIDs were initially developed in 2001 as part of the MIT Open
Knowledge Initiative Project funded by the Andrew W. Mellon Foundation
to provide an architecture for higher education learning systems. OSID
3K development began in 2006 to redesign the capabilities of the
specifications to apply to a much broader range of service domains and
integration challenges among both small and large-scale enterprise
systems.
The ``osid`` package defines the building blocks for the OSIDs which are
defined in packages for their respective services. This package defines
the top-level interfaces used by all the OSIDs as well as specification
metadata and the OSID Runtime interface.
Meta Interfaces and Enumerations
* ``OSID:`` an enumeration listing the OSIDs defined in the
specification.
* ``Syntax:`` an enumeration listing primitive types
* ``Metadata:`` an interface for describing data constraints on a data
element
Interface Behavioral Markers
Interface behavioral markers are used to tag a behavioral pattern of the
interface used to construct other object interfaces.
* ``OsidPrimitive:`` marks an OSID interface used as a primitive. OSID
primitives may take the form interfaces if not bound to a language
primitive. Interfaces used as primitives are marked to indicate that
the underlying objects may be constructed by an OSID Consumer and an
OSID Provider must honor any OSID primitive regardless of its
origin.
* ``Identifiable:`` Marks an interface identifiable by an OSID ``Id.``
* ``Extensible:`` Marks an interface as extensible through
``OsidRecords.``
* ``Browsable:`` Marks an interface as providing ``Property``
inspection for its ``OsidRecords.``
* ``Suppliable:`` Marks an interface as accepting data from an OSID
Consumer.
* ``Temporal:`` Marks an interface that has a lifetime with begin an
end dates.
* ``Subjugateable:`` Mars an interface that is dependent on another
object.
* ``Aggregateable:`` Marks an interface that contains other objects
normally related through other services.
* ``Containable:`` Marks an interface that contains a recursive
reference to itself.
* ``Sourceable:`` Marks an interface as having a provider.
* ``Federateable:`` Marks an interface that can be federated using the
OSID Hierarchy pattern.
* ``Operable:`` Marks an interface as responsible for performing
operatons or tasks. ``Operables`` may be enabled or disabled.
Abstract service Interfaces
* ``OsidProfile:`` Defines interoperability methods used by
OsidManagers.
* ``OsidManager:`` The entry point into an OSID and provides access to
``OsidSessions.``
* ``OsidProxyManager:`` Another entry point into an OSID providing a
means for proxying data from a middle tier application server to an
underlying OSID Provider.
* ``OsidSession`` : A service interface accessible from an
``OsidManager`` that defines a set of methods for an aspect of a
service.
Object-like interfac
|
es are generally defined along lines of
|
interoperability separating issues of data access from data management
and searching. These interfaces may also implement any of the abstract
behavioral interfaces listed above. The OSIDs do not adhere to a DAO/DTO
model in its service definitions in that there are service methods
defined on the objects (although they can be implemented using DTOs if
desired). For the sake of an outline, we'll pretend they are data
objects.
* ``OsidObject:`` Defines object data. ``OsidObjects`` are accessed
from ``OsidSessions.`` ``OsidObjects`` are part of an interface
hierarchy whose interfaces include the behavioral markers and a
variety of common ``OsidObjects.`` All ``OsidObjects`` are
``Identifiable,`` ``Extensible,`` and have a ``Type.`` There are
several variants of ``OsidObjects`` that indicate a more precise
behavior.
* ``OsidObjectQuery:`` Defines a set of methods to query an OSID for
its ``OsidObjects`` . An ``OsidQuery`` is accessed from an
``OsidSession.``
* ``OsidObjectQueryInspector:`` Defines a set of methods to examine an
``OsidQuery.``
* ``OsidObjectForm:`` Defines a set of methods to create and update
data. ``OsidForms`` are accessed from ``OsidSessions.``
* ``OsidObjectSearchOrder:`` Defines a set of methods to order search
results. ``OsidSearchOrders`` are accessed from ``OsidSessions.``
Most objects are or are derived from ``OsidObjects``. Some object
interfaces may not implement ``OsidObejct`` but instead derive directly
from interface behavioral markers. Other ``OsidObjects`` may include
interface behavioral markers to indicate functionality beyond a plain
object. Several categories of ``OsidObjects`` have been defined to
cluster behaviors to semantically distinguish their function in the
OSIDs.
* ``OsidCatalog:`` At the basic level, a catalog represents a
collection of other ``OsidObjects.`` The collection may be physical
or virtual and may be federated to build larger ``OsidCatalogs``
using hierarchy services. ``OsidCatalogs`` may serve as a control
point to filter or constrain the ``OsidObjects`` that may be visible
or created. Each ``OsidCatalog`` may have its own provider identifty
apart from the service provider.
* ``OsidRelationship:`` Relates two ``OsidObjects.`` The
``OsidRelationship`` represents the edge in a graph that may have
its own relationship type and data. ``OsidRelationships`` are
``Temporal`` in that they have a time in which the relationship came
into being and a time when the relationship ends.
* ``OsidRule:`` Defines an injection point for logic. An ``OsidRule``
may represent some constraint, evaluation, or execution. While
authoring of ``OsidRules`` is outside the scope of the OSIDs, an
``OsidRule`` provides the mean to identify the rule and map it to
certain ``OsidObjects`` to effect behavior of a service.
The most basic operations of an OSID center on retrieval, search, create
& update, and notifications on changes to an ``OsidObject``. The more
advanced OSIDs model a system behavior where a variety of implicit
relationships, constraints and rules come into play.
* ``OsidGovernator:`` Implies an activity or operation exists in the
OSID Provider acting as an ``Operable`` point for a set of rules
governing related ``OsidObjects.`` The ``OsidGovernator`` represents
an engine of sorts in an OSID Provider and may have its own provider
identity.
* ``OsidCompendium`` : ``OsidObjects`` which are reports or summaries
based on transactional data managed elsewhere.
Managing data governing rules occurs in a separate set of interfaces
from the effected ``OsidObjects`` (and often in a separate package).
This allows for a normalized set of rules managing a small set of
control points in a potentially large service.
* ``OsidEnabler:`` A managed control point to enable or disable the
operation or effectiveness of another ``OsidObject`` . Enablers
create a dynamic environment where behaviors and relationships can
come and go based on rule evauations.
* ``OsidConstrainer:`` A managed control point to configure the
constraints on the behavior of another ``OsidObject.``
* ``OsidProcessor:`` A managed control point to configure the behavior
of another ``OsidObject`` where some kins of processing is implied.
Other Abstract Interfaces
* ``OsidSearch:`` Defines set of methods to manage search options for
performing searches.
* ``OsidSearchResults:`` Defines a set of method
|
jmorenobl/django-template-project-1.8
|
project_name_project/config/settings/base.py
|
Python
|
mit
| 9,481
| 0.004324
|
"""
Django settings for {{ project_name }} project.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import sys
########## PATH CONFIGURATION
# Absolute filesystem path to the Django project directory:
CONFIG_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Absolute filesystem path to the top-level project folder:
SITE_ROOT = os.path.dirname(CONFIG_DIR)
# Absolute filesystem path to the apps folder:
APPS_DIR = os.path.join(SITE_ROOT, '{{project_name}}')
# Add our project to our pythonpath, this way we don't need to type our project
# name in our dotted import paths:
sys.path.append(SITE_ROOT)
########## END PATH CONFIGURATION
########## SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key should only be used for development and testing.
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = r"{{ secret_key }}"
########## END SECRET CONFIGURATION
########## DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
########## END DEBUG CONFIGURATION
########## SITE CONFIGURATION
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
########## END SITE CONFIGURATION
########## MANAGER CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
('Your Name', 'your_email@example.com'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
########## END MANAGER CONFIGURATION
########## DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
########## END DATABASE CONFIGURATION
########## FIXTURE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
os.path.normpath(os.path.join(SITE_ROOT, 'fixtures')),
)
########## END FIXTURE CONFIGURATION
########## MIDDLEWARE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#middleware-classes
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
########## END MIDDLEWARE CONFIGURATION
########## URL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = 'config.urls'
########## END URL CONFIGURATION
########## APP CONFIGURATION
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
# Third Party apps go here.
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
'django_extensions', # shell_plus
)
# Apps specific for this project go here.
LOCAL_APPS = (
'{{project_name}}.users', # custom users app
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
########## END APP CONFIGURATION
# MIGRATIONS CONFIGURATION
MIGRATION_MODULES = {
'sites': '{{project_name}}.contrib.sites.migrations'
}
########## END MIGRATIONS MODULES
########## TEMPLATE CONFIGURATION
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(APPS_DIR, 'templates'),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
|
},
},
]
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
########## END TEMPLATE CONFIGURATION
########## WSGI CONFIGURATION
# See: https://docs.djangoproject.
|
com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
########## END WSGI CONFIGURATION
########## INTERNATIONAL CONFIGURATION
# https://docs.djangoproject.com/en/1.8/topics/i18n/
# See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone
TIME_ZONE = 'Europe/Madrid'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'es-ES'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
########## END INTERNATIONAL CONFIGURATION
########## MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = os.path.normpath(os.path.join(APPS_DIR, 'media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
########## END MEDIA CONFIGURATION
########## STATIC FILE CONFIGURATION
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = os.path.normpath(os.path.join(APPS_DIR, 'staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
os.path.normpath(os.path.join(APPS_DIR, 'static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
########## END STATIC FILE CONFIGURATION
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_
|
google/tmppy
|
_py2tmp/utils/__init__.py
|
Python
|
apache-2.0
| 784
| 0
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed u
|
nder the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._ast_to_string import ast_to_s
|
tring
from ._clang_format import clang_format
from ._graphs import compute_condensation_in_topological_order
from ._ir_to_string import ir_to_string
|
paulcon/active_subspaces
|
tutorials/test_functions/borehole/borehole_functions.py
|
Python
|
mit
| 2,595
| 0.035067
|
import numpy as np
import active_subspaces as ac
def borehole(xx):
#each row of xx should be [rw, r, Tu, Hu, Tl, Hl, L, Kw] in the normalized space
#returns column vector of borehole function at each row of inputs
x = xx.copy()
x = np.atleast_2d(x)
M = x.shape[0]
#unnormalize inpus
xl = np.array([63070, 990, 63.1, 700, 1120, 9855])
xu = np.array([115600, 1110, 116, 820, 1680, 12045])
x[:,2:] = ac.utils.misc.BoundedNormalizer(xl, xu).unnormalize(x[:,2:])
x[:,0] = .0161812*x[:,0] + .1
x[:,1] = np.exp(1.0056*x[:,1] + 7.71)
rw = x[:,0]; r = x[:,1]; Tu =
|
x[:,2]; Hu = x[:,3]
Tl = x[:,4]; Hl = x[:,5]; L = x[:,6]; Kw = x[:,7]
pi = np.pi
return (2*pi*Tu*(Hu - Hl)
|
/(np.log(r/rw)*(1 + 2*L*Tu/(np.log(r/rw)*rw**2*Kw) + Tu/Tl))).reshape(M, 1)
def borehole_grad(xx):
#each row of xx should be [rw, r, Tu, Hu, Tl, Hl, L, Kw] in the normalized space
#returns matrix whose ith row is gradient of borehole function at ith row of inputs
x = xx.copy()
x = np.atleast_2d(x)
M = x.shape[0]
#unnormalize inpus
xl = np.array([63070, 990, 63.1, 700, 1120, 9855])
xu = np.array([115600, 1110, 116, 820, 1680, 12045])
x[:,2:] = ac.utils.misc.BoundedNormalizer(xl, xu).unnormalize(x[:,2:])
x[:,0] = .0161812*x[:,0] + .1
x[:,1] = np.exp(1.0056*x[:,1] + 7.71)
rw = x[:,0]; r = x[:,1]; Tu = x[:,2]; Hu = x[:,3]
Tl = x[:,4]; Hl = x[:,5]; L = x[:,6]; Kw = x[:,7]
pi = np.pi
Q = 1 + 2*L*Tu/(np.log(r/rw)*rw**2*Kw) + Tu/Tl #Convenience variable
l = np.log(r/rw) #Convenience variable
dfdrw = (-2*pi*Tu*(Hu - Hl)*(Q*l)**-2*(-Q/rw - l*2*L*Tu/Kw*(l*rw**2)**-2*(-rw + 2*rw*l)))[:,None]
dfdr = (-2*pi*Tu*(Hu - Hl)*(l*Q)**-2*(Q/r - 2*L*Tu/(r*rw**2*Kw*l)))[:,None]
dfdTu = (2*pi*(Hu - Hl)/(l*Q) - 2*pi*Tu*(Hu - Hl)*(l*Q)**-2*(l*(2*L/(l*rw**2*Kw)+1./Tl)))[:,None]
dfdHu = (2*pi*Tu/(l*Q))[:,None]
dfdTl = (2*pi*Tu*(Hu - Hl)*(l*Q)**-2*l*Tu/Tl**2)[:,None]
dfdHl = (-2*pi*Tu/(l*Q))[:,None]
dfdL = (-2*pi*Tu*(Hu - Hl)*(l*Q)**-2*2*Tu/(rw**2*Kw))[:,None]
dfdKw = (2*pi*Tu*(Hu - Hl)*(l*Q)**-2*2*L*Tu/(rw**2*Kw**2))[:,None]
#The gradient components must be scaled in accordance with the chain rule: df/dx = df/dy*dy/dx
r = np.log(r); r = ((r - 7.71)/1.0056).reshape(M, 1)
return np.hstack((dfdrw*.0161812, dfdr*1.0056*np.exp(1.0056*r + 7.71), dfdTu*(115600 - 63070)/2., dfdHu*(1110 - 990)/2.,\
dfdTl*(116 - 63.1)/2., dfdHl*(820 - 700)/2., dfdL*(1680 - 1120)/2., dfdKw*(12045 - 9855)/2.))
|
jyi/ITSP
|
prophet-gpl/tools/return_counter.py
|
Python
|
mit
| 423
| 0.018913
|
#!/usr/bin/env python
f = open("repair.log", "r");
lines = f.readl
|
ines();
cnt = 0;
for line in lines:
|
tokens = line.strip().split();
if (len(tokens) > 3):
if (tokens[0] == "Total") and (tokens[1] == "return"):
cnt += int(tokens[3]);
if (tokens[0] == "Total") and (tokens[2] == "different") and (tokens[3] == "repair"):
cnt += int(tokens[1]);
print "Total size: " + str(cnt);
|
ploggingdev/djangochat
|
djangochat/urls.py
|
Python
|
gpl-3.0
| 817
| 0
|
"""djangochat URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a U
|
RL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^', include('chatdemo.urls')),
url
|
(r'^admin/', admin.site.urls),
]
|
natetrue/ReplicatorG
|
skein_engines/skeinforge-40/fabmetheus_utilities/geometry/manipulation_paths/_outset.py
|
Python
|
gpl-2.0
| 1,041
| 0.009606
|
"""
Create inset.
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
i
|
mport __init__
from fabmetheus_utilities.geometry.creation import lineation
from fabmetheus_utilities.geometry.geometry_utilities import evaluate
from fabmetheus_utilities import intercircle
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__credits
|
__ = 'Art of Illusion <http://www.artofillusion.org/>'
__date__ = '$Date: 2008/02/05 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
globalExecutionOrder = 80
def getManipulatedPaths(close, loop, prefix, sideLength, xmlElement):
"Get outset path."
radius = lineation.getStrokeRadiusByPrefix(prefix, xmlElement )
return intercircle.getInsetLoopsFromVector3Loop(loop, -radius)
def processXMLElement(xmlElement):
"Process the xml element."
lineation.processXMLElementByFunction(getManipulatedPaths, xmlElement)
|
GabMus/simpelblog
|
blog/models.py
|
Python
|
gpl-3.0
| 969
| 0.034056
|
from django.db import models
class Article(models.Model):
posttitle = models.TextField(default="Post")
post = models.TextField()
piclink = models.TextField(blank=True)
pub_date = models.DateTimeField(auto_now_add=True)
class BlogPost(Article):
def __str__(self):
return self.posttitle
class PagePost(Article):
parentpage = models.ForeignKey('Page'
|
, null=True)
def __str__(self):
return self.tag+" "+self.posttitle
class Page(models.Model):
page_index = models.IntegerField(default=0)
name = models.CharField(max_length=200, unique=True)
def __str__(self):
return self.name
class Comment(models.Model):
name=models.CharField(max_length=20, blank=False)
email=models.CharField(max_length=120, blank=False)
text=models.CharField(max_length=512,
|
blank=False)
parent_article=models.ForeignKey('BlogPost', null=False)
pub_date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.email+" "+self.parent_article.__str__()
|
duyuan11/glumpy
|
examples/interpolations.py
|
Python
|
bsd-3-clause
| 4,303
| 0.008599
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2014, Nicolas P. Rougier. All Rights Reserved.
# Distributed under the (new) BSD License.
# -----------------------------------------------------------------------------
""" This example shows spatial interpolation of images. """
import numpy as np
from glumpy import app, gl, gloo, data, library
vertex = """
attribute vec2 position;
attribute vec2 texcoord;
attribute float interpol;
varying vec2 v_texcoord;
varying float v_interpol;
void main()
{
gl_Position = vec4(position, 0.0, 1.0);
v_texcoord = texcoord;
v_interpol = interpol;
} """
fragment = """
#include "misc/spatial-filters.frag"
uniform sampler2D u_data;
uniform vec2 u_shape;
varying vec2 v_texcoord;
varying float v_interpol;
void main()
{
if (v_interpol < 0.5)
// gl_FragColor = Nearest(u_data, u_shape, v_texcoord);
gl_FragColor = texture2D(u_data, v_texcoord);
else if (v_interpol < 1.5)
gl_FragColor = Bilinear(u_data, u_shape, v_texcoord);
else if (v_interpol < 2.5)
gl_FragColor = Hanning(u_data, u_shape, v_texcoord);
else if (v_interpol < 3.5)
gl_FragColor = Hamming(u_data, u_shape, v_texcoord);
else if (v_interpol < 4.5)
gl_FragColor = Hermite(u_data, u_shape, v_texcoord);
else if (v_interpol < 5.5)
gl_FragColor = Kaiser(u_data, u_shape, v_texcoord);
else if (v_interpol < 6.5)
gl_FragColor = Quadric(u_data, u_shape, v_texcoord);
else if (v_interpol < 7.5)
gl_FragColor = Bicubic(u_data, u_shape, v_texcoord);
else if (v_interpol < 8.5)
gl_FragColor = CatRom(u_data, u_shape, v_texcoord);
else if (v_interpol < 9.5)
gl_FragColor = Mitchell(u_data, u_shape, v_texcoord);
else if (v_interpol < 10.5)
gl_FragColor = Spline16(u_data, u_shape, v_texcoord);
else if (v_interpol < 11.5)
gl_FragColor = Spline36(u_data, u_shape, v_texcoord);
else if (v_interpol < 12.5)
gl_FragColor = Gaussian(u_data, u_shape, v_texcoord);
else if (v_interpol < 13.5)
gl_FragColor = Bessel(u_data, u_shape, v_texcoord);
else if (v_interpol < 14.5)
gl_FragColor = Sinc(u_data, u_shape, v_texcoord);
else if (v_interpol < 15.5)
gl_FragColor = Lanczos(u_data, u_shape, v_texcoord);
else
gl_FragColor = Blackman(u_data, u_shape, v_texcoord);
} """
window = app.Window(width=4*512, height=2*512)
@window.event
def on_draw(dt):
window.clear()
program.draw(gl.GL_TRIANGLES, indices)
@window.event
def on_mouse_motion(x, y, dx, dy):
global zoom
dx, dy = 0.05*zoom, 0.05*zoom
x = min(max(x/1024.0, dx), 1.0-dx)
y = min(max(y/1024.0, dy), 1.0-dy)
vertices[1:]['texcoord'] = (x-dx,y-dy), (x-dy,y+dy), (x+dx, y-dy), (x+dx,y+dy)
@window.event
def on_mouse_scroll(x, y, dx, dy):
global zoom
zoom = np.minimum(np.maximum(zoom*(1+dy/100.0), 0.001), 10.00)
|
on_mouse_motion(x,y,0,0)
zoom = 0.25
program = gloo
|
.Program(vertex, fragment)
vertices = np.zeros((16+1,4),
[("position", np.float32, 2),
("texcoord", np.float32, 2),
("interpol", np.float32, 1)]).view(gloo.VertexBuffer)
vertices["position"][0] = (-1,+1), (-1,-1), (0,+1), (0,-1)
dx, dy = 1/4.0, 1/2.0
for j in range(4):
for i in range(4):
index = 1+j*4+i
x, y = i/4.0, -1 + j/2.0
vertices["position"][index] = (x,y+dy), (x,y), (x+dx,y+dy), (x+dx,y)
vertices['texcoord'] = ( 0, 0), ( 0,+1), (+1, 0), (+1,+1)
vertices['interpol'] = np.arange(17).reshape(17,1)
program.bind(vertices)
indices = np.zeros((17,6),np.uint32).view(gloo.IndexBuffer)
indices[:] = [0,1,2,1,2,3]
indices += 4*np.arange(17).reshape(17,1)
lena = data.get("lena.png")
program['u_data'] = lena
program['u_shape'] = lena.shape[1], lena.shape[0]
program['u_kernel'] = data.get("spatial-filters.npy")
program['u_data'].interpolation = gl.GL_NEAREST
program['u_data'].wrapping = gl.GL_CLAMP
x,y = 512,512
dx, dy = 0.05, 0.05
x = min(max(x/1024.0, dx), 1.0-dx)
y = min(max(y/1024.0, dy), 1.0-dy)
vertices['texcoord'][1:] = (x-dx,y-dy), (x-dy,y+dy), (x+dx, y-dy), (x+dx,y+dy)
app.run()
|
wrenchzc/photomanager
|
photomanager/db/imagehandler.py
|
Python
|
mit
| 2,547
| 0.000785
|
import os
from sqlalchemy import and_
from photomanager.lib.pmconst import TODO_INX_NAME
from photomanager.utils.imageutils import ImageInfo
from photomanager.db.helper import exif_to_model
from photomanager.db.models import ImageMeta
from photomanager.lib.helper import get_file_md5
from photomanager.db.config import Config
class ImageDBHandler:
def __init__(self, folder, session, skip_existed):
"""
:param session: db session
:param filenames: list of image filenames
|
"""
self.session = session
self.config = Config(self.session)
self.folder = folder
self.skip_exis
|
ted = skip_existed
self._on_index_image = None
@property
def on_index_image(self):
return self._on_index_image
@on_index_image.setter
def on_index_image(self, func_on_index_image):
assert callable(func_on_index_image)
self._on_index_image = func_on_index_image
def do_index(self, filenames):
cnt = 0
for inx, filename in enumerate(filenames):
filename = filename.strip()
self.index_image(filename)
cnt += 1
if self.on_index_image:
self.on_index_image(inx)
if inx % 100 == 0:
self.session.commit()
self.session.commit()
return cnt
def index_image(self, filename):
folder = os.path.dirname(filename)
basename = os.path.basename(filename)
image_meta_existed = self.session.query(ImageMeta).filter(
and_(ImageMeta.filename == basename, ImageMeta.folder == folder)).first()
full_file_name = self.folder + '/' + filename
if image_meta_existed and (self.skip_existed or image_meta_existed.md5 == get_file_md5(full_file_name)):
return None
image_info = ImageInfo(full_file_name)
image_meta_new = exif_to_model(image_info)
image_meta_new.filename = basename
image_meta_new.folder = folder
if image_meta_existed:
image_meta_new.id = image_meta_existed.id
image_meta_new.uuid = image_meta_existed.uuid
self.session.merge(image_meta_new)
return image_meta_new
@property
def todo_index(self):
value = self.config.get_value(TODO_INX_NAME)
if value:
return int(value)
else:
return -1
@todo_index.setter
def todo_index(self, value):
assert isinstance(value, int)
self.config.set_value(TODO_INX_NAME, value)
|
denverfoundation/storybase
|
apps/storybase_story/feeds.py
|
Python
|
mit
| 4,815
| 0.003323
|
from mimetypes import guess_type
from django.conf import settings
from django.contrib.syndication.views import Feed
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
from django.utils.text import Truncator
from django.utils.translation import ugettext as _
from storybase_asset.models import FEATURED_ASSET_THUMBNAIL_WIDTH, FEATURED_ASSET_THUMBNAIL_HEIGHT
from storybase_story.models import Story
from storybase_taxonomy.models import Category
class StoriesFeed(Feed):
"""
Generates a feed of the 25 most recently published stories
Allows basic filtering by topic slug by providing either a
``topics=SLUG`` or ``topics-exclude=SLUG`` querystring parameter to
the GET request.
"""
title = "%s %s" % (settings.STORYBASE_SITE_NAME, _("Stories"))
description = _("Recent stories from ") + settings.STORYBASE_SITE_NAME
# Map of query string parameters to Queryset filters
QUERY_MAP = {
'topics': 'topics__categorytranslation__slug',
}
def link(self):
return reverse('explore_stories')
def get_object(self, request, *args, **kwargs):
# HACK: Dummy get_object implementation that doesn't actually get an
# object, but has the side effect of storying the request object as
# an attribute of the Feed object
self.request = request
return super(StoriesFeed, self).get_object(request, *args, **kwargs)
def get_filter_kwargs(self):
"""
Get queryset filter/exclude arguments from the request's GET parameters
Returns a tuple of dictionaries, the first providing arguments suitable
for a call to Queryset.filter() and the second providing arguments
for a cal to Queryset.exclude()
"""
filter_kwargs = {}
exclude_kwargs = {}
for param, lookup in self.QUERY_MAP.items():
exclude_param = '%s-exclude' % param
if param in self.request.GET:
filter_kwargs[lookup] = self.request.GET[param]
if exclude_param in self.request.GET:
exclude_kwargs[lookup] = self.request.GET[exclude_param]
return filter_kwargs, exclude_kwargs
def items(self):
# Only show non-connected, published stories in the feed
queryset = Story.objects.exclude(source__relation_type='connected').published()
filter_kwargs, exclude_kwargs = self.get_filter_kwargs()
if filter_kwargs:
queryset = queryset.filter(**filter_kwargs)
if exclude_kwargs:
queryset = queryset.exclude(**exclude_kwargs)
return queryset.order_by('-published')[:25]
def item_title(self, item):
return item.title
def item_description(self, item):
truncator = Truncator(item.summary)
return truncator.words(75, html=True)
def item_author_name(self, item):
return item.contributor_name
def item_pubdate(self, item):
return item.published
def item_updateddate(self, item):
return item.last_edited
def item_categories(self, item
|
):
category_objs = list(item.projects.all()) + list(item.organizations.all()) + list(item.tags.all()) + list(item.topics.all())
return [obj.name for obj in category_objs
|
]
def item_copyright(self, item):
return item.license_name()
def item_enclosure_url(self, item):
return item.featured_asset_thumbnail_url()
def item_enclosure_length(self, item):
asset = item.get_featured_asset()
thumbnail_options = {
'size': (FEATURED_ASSET_THUMBNAIL_WIDTH,FEATURED_ASSET_THUMBNAIL_HEIGHT),
}
try:
return asset.get_thumbnail(thumbnail_options).size
except AttributeError:
return 0
def item_enclosure_mime_type(self, item):
url = item.featured_asset_thumbnail_url()
(mtype, encoding) = guess_type(url)
return mtype
class TopicStoriesFeed(StoriesFeed):
"""
Generates a feed of the 25 most recently published stories in a particular
topic
The topic is passed to the feed via a ``slug`` keyword argument in the URL
configuration for the feed.
"""
def get_object(self, request, slug):
return get_object_or_404(Category, categorytranslation__slug=slug)
def title(self, obj):
return "%s %s %s" % (settings.STORYBASE_SITE_NAME, obj.name, _("Stories"))
def description(self, obj):
return _("Recent ") + obj.name + _(" stories from ") + settings.STORYBASE_SITE_NAME
def link(self, obj):
return "%s?topics=%s" % (reverse('explore_stories'), obj.pk)
def items(self, obj):
return Story.objects.exclude(source__relation_type='connected').published().filter(topics=obj).order_by('-published')[:25]
|
brutasse/graphite-api
|
graphite_api/render/datalib.py
|
Python
|
apache-2.0
| 7,097
| 0
|
"""Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
from collections import defaultdict
from structlog import get_logger
from ..utils import epoch
logger = get_logger()
class TimeSeries(list):
def __init__(self, name, start, end, step, values, consolidate='average'):
list.__init__(self, values)
self.name = name
self.start = start
self.end = end
self.step = step
self.consolidationFunc = consolidate
self.valuesPerPoint = 1
self.options = {}
self.pathExpression = name
def __eq__(self, other):
if isinstance(other, TimeSeries):
color_eq = True
if hasattr(self, 'color'):
if hasattr(other, 'color'):
color_eq = (self.color == other.color)
else:
color_eq = False
elif hasattr(other, 'color'):
color_eq = False
return ((self.name, self.start, self.step, self.consolidationFunc,
self.valuesPerPoint, self.options) ==
(other.name, other.start, other.step,
other.consolidationFunc, other.valuesPerPoint,
other.options)) and list.__eq__(self, other) and color_eq
return False
def __iter__(self):
if self.valuesPerPoint > 1:
return self.__consolidatingGenerator(list.__iter__(self))
else:
return list.__iter__(self)
def consolidate(self, valuesPerPoint):
self.valuesPerPoint = int(valuesPerPoint)
def __consolidatingGenerator(self, gen):
buf = []
for x in gen:
buf.append(x)
if len(buf) == self.valuesPerPoint:
while None in buf:
buf.remove(None)
if buf:
yield self.__consolidate(buf)
buf = []
else:
yield None
while None in buf:
buf.remove(None)
if buf:
yield self.__consolidate(buf)
else:
yield None
return
def __consolidate(self, values):
usable = [v for v in values if v is not None]
if not usable:
return None
if self.consolidationFunc == 'sum':
return sum(usable)
if self.consolidationFunc == 'average':
return float(sum(usable)) / len(usable)
if self.consolidationFunc == 'max':
return max(usable)
if self.consolidationFunc == 'min':
return min(usable)
raise Exception(
"Invalid consolidation function: '%s'" % self.consolidationFunc)
def __repr__(self):
return 'TimeSeries(name=%s, start=%s, end=%s, step=%s)' % (
self.name, self.start, self.end, self.step)
class DataStore(object):
"""
Simple object to store results of multi fetches.
Also aids in looking up data by pathExpressions.
"""
def __init__(self):
self.paths = defaultdict(set)
self.data = defaultdict(list)
def get_paths(self, path_expr):
"""
Returns all paths found for path_expr
"""
return sorted(self.paths[path_expr])
def add_data(self, path, time_info, data, exprs):
"""
Stores data before it can be put into a time series
"""
# Dont add if empty
if not nonempty(data):
for d in self.data[path]:
if nonempty(d['values']):
return
# Add data to path
for expr in exprs:
self.paths[expr].add(path)
self.data[path].append({
'time_info': time_info,
'values': data
})
def get_series_list(self, path_expr):
series_list = []
for path in self.get_paths(path_expr):
for data in self.data.get(path):
start, end, step = data['time_info']
series = TimeSeries(path, start, end, step, data['values'])
|
series.pathExpression = path_expr
series_list.append(series)
return series_list
def fetchData(requestContext, pathExprs):
from ..app import app
startTime = int(epoch(requestContext['startTime']))
endTime = int(epoch(requestContext['endTime']))
if 'now' i
|
n requestContext:
now = int(epoch(requestContext['now']))
else:
now = None
# Convert to list if given single path
if not isinstance(pathExprs, list):
pathExprs = [pathExprs]
data_store = DataStore()
multi_nodes = defaultdict(list)
single_nodes = []
path_to_exprs = defaultdict(list)
# Group nodes that support multiple fetches
for pathExpr in pathExprs:
for node in app.store.find(pathExpr, startTime, endTime):
if not node.is_leaf:
continue
if node.path not in path_to_exprs:
if hasattr(node, '__fetch_multi__'):
multi_nodes[node.__fetch_multi__].append(node)
else:
single_nodes.append(node)
path_to_exprs[node.path].append(pathExpr)
# Multi fetches
for finder in app.store.finders:
if not hasattr(finder, '__fetch_multi__'):
continue
nodes = multi_nodes[finder.__fetch_multi__]
if not nodes:
continue
try:
time_info, series = finder.fetch_multi(nodes, startTime, endTime,
now, requestContext)
except TypeError:
time_info, series = finder.fetch_multi(nodes, startTime, endTime)
for path, values in series.items():
data_store.add_data(path, time_info, values,
path_to_exprs[path])
# Single fetches
fetches = [
(node.path, node.fetch(startTime, endTime, now, requestContext))
for node in single_nodes
]
for path, results in fetches:
if not results:
logger.info("no results", path=path, start=startTime,
end=endTime)
continue
try:
time_info, values = results
except ValueError as e:
raise Exception("could not parse timeInfo/values from metric "
"'%s': %s" % (path, e))
data_store.add_data(path, time_info, values, path_to_exprs[path])
return data_store
def nonempty(series):
for value in series:
if value is not None:
return True
return False
|
rewiko/chat_django
|
chat/urls.py
|
Python
|
gpl-2.0
| 881
| 0.013652
|
# -*- coding: utf-8 -*-
from chat.models import Message
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.contrib.auth.models import User
from django.views
|
.generic.list import ListView
# Uncomment the next two lines to enable the admin:
admin.autodiscover()
urlpatterns = patterns('chat.views',
# Nous allons réécrire l'URL de l'accueil
url(r'^check_messages_ajax', 'check_messages_ajax', name='check_messages_ajax'),
url(r'^connexion/$', 'connexion', name='connexion'),
url(r'^deconnexion/$', 'deconnexion', name='deconnexion'
|
),
url(r'^$', 'connexion', name='connexion'),
url(r'^add_message', 'add_message', name='add_message'),
url(r'^$', ListView.as_view(model=Message, context_object_name="derniers_messages",
template_name="message_list.html")),
)
|
alexandr-fonari/raman-sc
|
VASP/vasp_raman.py
|
Python
|
mit
| 11,443
| 0.008914
|
#!/usr/bin/env python
#
# Raman off-resonant activity calculator
# using VASP as a back-end.
#
# Contributors: Alexandr Fonari (Georgia Tech)
# Shannon Stauffer (UT Austin)
#
# MIT license, 2013
#
def parse_poscar_header(inp_fh):
import sys
from math import sqrt
#
inp_fh.seek(0) # just in case
poscar_header = ""
vol = 0.0
b = []
atom_numbers = []
#
inp_fh.readline() # skip title
scale = float(inp_fh.readline())
for i in range(3): b.append( [float(s) for s in inp_fh.readline().split()] )
#
if scale > 0.0:
b = [[ b[i][j]*scale for i in range(3)] for j in range(3) ]
scale = 1.0
#
vol = b[0][0]*b[1][1]*b[2][2] + b[1][0]*b[2][1]*b[0][2] + b[2][0]*b[0][1]*b[1][2] - \
b[0][2]*b[1][1]*b[2][0] - b[2][1]*b[1][2]*b[0][0] - b[2][2]*b[0][1]*b[1][0]
else:
print "[parse_poscar]: ERROR negative scale not implemented."
vol = scale
sys.exit(1)
#
atom_labels = inp_fh.readline() # yes, it is hardcoded for VASP5
atom_numbers = [int(s) for s in inp_fh.readline().split()]
nat = sum(atom_numbers)
#
poscar_header += "%15.12f\n" % scale
poscar_header += "%15.12f %15.12f %15.12f\n" % (b[0][0], b[0][1], b[0][2])
poscar_header += "%15.12f %15.12f %15.12f\n" % (b[1][0], b[1][1], b[1][2])
poscar_header += "%15.12f %15.12f %15.12f\n" % (b[2][0], b[2][1], b[2][2])
poscar_header += atom_labels
poscar_header += " ".join(str(x) for x in atom_numbers)+"\n"
#
return nat, vol, poscar_header
#
def parse_env_params(params):
import sys
#
tmp = params.strip().split('_')
if len(tmp) != 4:
print "[parse_env_params]: ERROR there should be exactly four parameters"
sys.exit(1)
#
[first, last, nderiv, step_size] = [int(tmp[0]), int(tmp[1]), int(tmp[2]), float(tmp[3])]
#
return first, last, nderiv, step_size
#
def get_modes_from_OUTCAR(outcar_fh, nat):
import sys
import re
from math import sqrt
eigvals = [ 0.0 for i in range(nat*3) ]
eigvecs = [ 0.0 for i in range(nat*3) ]
norms = [ 0.0 for i in range(nat*3) ]
pos = [ 0.0 for i in range(nat) ]
#
outcar_fh.seek(0) # just in case
while True:
line = outcar_fh.readline()
if not line:
break
#
if "Eigenvectors after division by SQRT(mass)" in line:
outcar_fh.readline() # empty line
outcar_fh.readline() # Eigenvectors and eigenvalues of the dynamical matrix
outcar_fh.readline() # ----------------------------------------------------
outcar_fh.readline() # empty line
#
for i in range(nat*3): # all frequencies should be supplied, regardless of those requested to calculate
outcar_fh.readline() # empty line
p = re.search(r'^\s*(\d+).+?([\.\d]+) cm-1', outcar_fh.readline())
eigvals[i] = float(p.group(2))
#
outcar_fh.readline() # X Y Z dx dy dz
eigvec = []
#
for j in range(nat):
tmp = outcar_fh.readline().split()
if i == 0: pos[j] = [ float(tmp[x]) for x in range(3) ] # get atomic positions only once
#
eigvec.append([ float(tmp[x]) for x in range(3,6) ])
#
eigvecs[i] = eigvec
norms[i] = sqrt( sum( [abs(x)**2 for sublist in eigvec for x in sublist] ) )
#
return pos, eigvals, eigvecs, norms
#
print "[get_modes_from_OUTCAR]: ERROR Couldn't find 'Eigenvectors after division by SQRT(mass)' in OUTCAR. Use 'NWRITE=3' in INCAR. Exiting..."
sys.exit(1)
#
def get_epsilon_from_OUTCAR(outcar_fh):
import re
import sys
epsilon = []
#
outcar_fh.seek(0) # just in case
while True:
line = outcar_fh.readline()
if not line:
break
#
if "MACROSCOPIC STATIC DIELECTRIC TENSOR" in line:
outcar_fh.readline()
epsilon.append([float(x) for x in outcar_fh.readline().split()])
epsilon.append([float(x) for x in outcar_fh.readline().split()])
epsilon.append([float(x) for x in outcar_fh.readline().split()])
return epsilon
#
raise RuntimeError("[get_epsilon_from_OUTCAR]: ERROR Couldn't find dielectric tensor in OUTCAR")
return 1
#
if __name__ == '__main__':
import sys
from math import pi
from shutil import move
import os
import datetime
import time
#import argparse
import optparse
#
print ""
print " Raman off-resonant activity calculator,"
print " using VASP as a back-end."
print ""
print " Contributors: Alexandr Fonari (Georgia Tech)"
print " Shannon Stauffer (UT Austin)"
print " MIT License, 2013"
print " URL: http://raman-sc.github.io"
print " Started at: "+datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
print ""
#
description = "Before run, set environment variables:\n"
description += " VASP_RAMAN_RUN='mpirun vasp'\n"
description += " VASP_RAMAN_PARAMS='[first-mode]_[last-mode]_[nderiv]_[step-size]'\n\n"
description += "bash one-liner is:\n"
description += "VASP_RAMAN_RUN='mpirun vasp' VASP_RAMAN_PARAMS='1_2_2_0.01' python vasp_raman.py"
#
parser = optparse.OptionParser(description=description)
parser.add_option('-g', '--gen', help='Generate POSCAR only', action='store_true')
parser.add_option('-u', '--use_poscar', help='Use provided POSCAR in the folder, USE WITH CAUTION!!', action='store_true')
(options, args) = parser.parse_args()
#args = vars(parser.parse_args())
args = vars(options)
#
VASP_RAMAN_RUN = os.environ.get('VASP_RAMAN_RUN')
if VASP_RAMAN_RUN == None:
print "[__main__]: ERROR Set environment variable 'VASP_RAMAN_RUN'"
print ""
parser.print_help()
sys.exit(1)
print "[__main__]: VASP_RAMAN_RUN='"+VASP_RAMAN_RUN+"'"
#
VASP_RAMAN_PARAMS = os.environ.get('VASP_RAMAN_PARAMS')
if VASP_RAMAN_PARAMS == None:
print "[__main__]: ERROR Set environment variable 'VASP_RAMAN_PARAMS'"
print ""
parser.print_help()
sys.exit(1)
print "[__main__]: VASP_RAMAN_PARAMS='"+VASP_RAMAN_PARAMS+"'"
#
first, last, nderiv, step_size = parse_env_params(VASP_RAMAN_PARAMS)
assert first >= 1, '[__main__]: First mode should be equal or larger than 1'
assert last >= first, '[__main__]: Last mode should be equal or larger than first mode'
if args['gen']: assert last == first, "[__main__]: '-gen' mode -> only generation for the one mode makes sense"
assert nderiv == 2, '[__main__]: At this time, nderiv = 2 is the only supported'
disps = [-1, 1] # hardcoded for
coeffs = [-0.5, 0.5] # three point stencil (nderiv=2)
#
try:
poscar_fh = open('POSCAR.phon', 'r')
except IOError:
print "[__main__]: ERROR Couldn't open input file POSCAR.phon, exiting...\n"
sys.exit(1)
#
nat, vol, poscar_header = parse_poscar_header(poscar_fh)
poscar_fh.close()
#
try:
outcar_fh = open('OUTCAR.phon', 'r')
except IOError:
print "[__main__]: ERROR Couldn't open OUTCAR.phon, exiting...\n"
sys.exit(1)
#
pos, eigvals, eigvecs, norms = get_modes_from_OUTCAR(outcar_fh, nat)
outcar_fh.close()
#
output_fh = open('vasp_raman.dat', 'w')
output_fh.write("# mode freq(cm-1) alpha beta2 activity\n")
for i in range(first-1, last):
eigval = eigvals[i]
eigvec = eigvecs[i]
norm = norms[i]
#
print ""
print "[__main__]: Mode #%i: frequency %10.7f cm-1; norm: %10.7f" % ( i+1, eigval, norm )
#
ra =
|
[[0.0 for x in range(3)] for y in range(3)]
for j in range(len(disps)):
disp_filename = 'OUTCAR.%04d.%+d.out' % (i+1, disps[j])
#
|
try:
|
mikehulluk/morphforge
|
doc/srcs_generated_examples/python_srcs/poster1.py
|
Python
|
bsd-2-clause
| 4,063
| 0.007138
|
import matplotlib as mpl
mpl.rcParams['font.size'] = 14
from morphforge.stdimports import *
from morphforgecontrib.stdimports import *
eqnset_txt_na = """
define_component hh_na {
i = g * (v-erev) * m**3*h
m_inf = m_alpha_rate / (m_alpha_rate + m_beta_rate)
m_tau = 1.0 / (m_alpha_rate + m_beta_rate)
m' = (m_inf-m) / m_tau
h_inf = h_alpha_rate / (h_alpha_rate + h_beta_rate)
h_tau = 1.0 / (h_alpha_rate + h_beta_rate)
h' = (h_inf-h) / h_tau
StdFormAB(V, a1, a2, a3, a4, a5) = (a1+a2*V)/(a3+std.math.exp((V+a4)/a5))
m_alpha_rate = StdFormAB(V=v, a1=m_a1, a2=m_a2, a3=m_a3, a4=m_a4, a5=m_a5)
m_beta_rate = StdFormAB(V=v, a1=m_b1, a2=m_b2, a3=m_b3, a4=m_b4, a5=m_b5)
h_alpha_rate = StdFormAB(V=v, a1=h_a1, a2=h_a2, a3=h_a3, a4=h_a4, a5=h_a5)
h_beta_rate = StdFormAB(V=v, a1=h_b1, a2=h_b2, a3=h_b3, a4=h_b4, a5=h_b5)
m_a1={-4.00 ms-1}; m_a2={-0.10 mV-1 ms-1}; m_a3={-1.00}; m_a4={40.00 mV}; m_a5={-10.00 mV};
m_b1={ 4.00 ms-1}; m_b2={ 0.00 mV-1 ms-1}; m_b3={ 0.00}; m_b4={65.00 mV}; m_b5={ 18.00 mV};
h_a1={ 0.07 ms-1}; h_a2={ 0.00 mV-1 ms-1}; h_a3={ 0.00}; h_a4={65.00 mV}; h_a5={ 20.00 mV};
h_b1={ 1.00 ms-1}; h_b2={ 0.00 mV-1 ms-1}; h_b3={ 1.00}; h_b4={35.00 mV}; h_b5={-10.00 mV};
erev = 50.0mV;
<=> PARAMETER g:(S/m2)
<=> OUTPUT i:(A/m2) METADATA {"mf":{"role":"TRANSMEMBRANECURRENT"} }
<=> INPUT v: V METADATA {"mf":{"role":"MEMBRANEVOLTAGE"} }
} """
eqnset_txt_k = """
define_component hh_k {
i = g * (v-erev) * n*n*n*n
n_inf = n_alpha_rate / (n_alpha_rate + n_beta_rate)
n_tau = 1.0 / (n_alpha_rate + n_beta_rate)
n' = (n_inf-n) / n_tau
StdFormAB(V, a1, a2, a3, a4, a5) = (a1 + a2*V)/(a3+std.math.exp((V+a4)/a5))
n_alpha_rate = StdFormAB(V=v, a1=n_a1, a2=n_a2, a3=n_a3, a4=n_a4, a5=n_a5)
n_beta_rate = StdFormAB(V=v, a1=n_b1, a2=n_b2, a3=n_b3, a4=n_b4, a5=n_b5)
n_a1={-0.55 ms-1}; n_a2={-0.01 mV-1 ms-1}; n_a3={-1.00}; n_a4={55.00 mV}; n_a5={-10.00 mV}
n_b1={0.125 ms-1}; n_b2={ 0.00 mV-1 ms-1}; n_b3={ 0.00}; n_b4={65.00 mV}; n_b5={ 80.00 mV}
g = {36.0mS/cm2}
erev = {-77.0mV}
<=> OUTPUT i:(A/m2) METADATA {"mf":{"role":"TRANSMEMBRANECURRENT"} }
<=> INPUT v: V METADATA {"mf":{"role":"MEMBRANEVOLTAGE"} }
} """
eqnset_txt_lk = """
define_component hh_lk {
i = {0.3mS/cm2} * (v- {-54.3mV})
<=> OUTPUT i:(A/m2) METADATA {"mf":{"role":"TRANSMEMBRANECURRENT"} }
<=> INPUT v: V METADATA {"mf":{"role":"MEMBRANEVOLTAGE"} }
} """
env = NEURONEnvironment()
sim = env.Simulation()
# Create a cell:
morph_dict = {'root': {'length': 18.8, 'diam': 18.8, 'id':'soma'} }
my_morph = MorphologyTree.fromDictionary(morph_dict)
cell = sim.create_cell(name="Cell1", morphology=my_morph)
#soma = cell.get_location("soma")
# Setup passive channels:
cell.set_passive( PassiveProperty.SpecificCapacitance, qty('1.0:uF/cm2'))
# Setup active channels:
na_chl = env.Channel(NeuroUnitEqnsetMechanism, name="NaChl", eqnset=eqnset_txt_na,
default_parameters={"g":qty("120:mS/cm2")}, )
k_chl = env.Channel(NeuroUnitEqnsetMechanism, name="KChl", eqnset=eqnset_txt_k, )
lk_chl = env.Channel(NeuroUnitEqnsetMechanism, name="LKChl", eqnset=eqnset_txt_lk, )
cell.apply_channel( na_chl)
cell.apply_channel( lk_chl)
cell.apply_channel( k_chl)
# Define what to record:
sim.record(cell, what=StandardTags.Voltage, name="SomaVoltage", cell_location = cell.soma)
sim.record(na_chl, what='m', cell_location=cell.soma, user_tags=[StandardTags.StateVariable])
sim.record(na_chl, what='h', cell_location=cell.soma, user_tags=[StandardTags.StateVariable])
sim.record(k_chl
|
, what='n', cell_location=cell.soma, user_tags=[StandardTags.StateVariable])
# Create t
|
he stimulus and record the injected current:
cc = sim.create_currentclamp(name="CC1", amp=qty("100:pA"), dur=qty("100:ms"), delay=qty("100:ms"), cell_location=cell.soma)
sim.record(cc, what=StandardTags.Current)
# run the simulation
results = sim.run()
TagViewer(results, timerange=(50, 250)*units.ms, show=True)
|
google/wasserstein-dist
|
compute_all.py
|
Python
|
apache-2.0
| 2,165
| 0.006467
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compute Wasserstein distances between different subsets of CIFAR.
Note: comparing two fixed sets is a sanity check, not the target use case.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import time
import tensorflow as tf
from dataset import Dataset
from wasserstein import Wasserstein
tf.flags.DEFINE_string('filepattern', '/tmp/cifar10/cifar_train_class_%d.pic',
'Filepattern from which to read the dataset.')
tf.flags.DEFINE_integer('batch_size', 1000, 'Batch size of generator.')
tf.flags.DEFINE_integer('loss_steps', 50, 'Number of optimization steps.')
FLAGS = tf.flags.FLAGS
def print_flush(string):
sys.stdout.write(string)
sys.stdout.flush()
def main(unused_argv):
# tf.logging.set_verbosity(tf.logging.INFO)
# load two copies of the dataset
print('Loading datasets...')
dataset = [Dataset(bs=FLAGS.batch_size, filepattern=FLAGS.fil
|
epattern,
label=i) for i in range(10)]
print('Computing Wasserstein distance(s)...')
for i in range(10):
for j in range(10):
with tf.Graph().as_default():
# compute Wasserstein distance between sets of labels i and j
wasserstein
|
= Wasserstein(dataset[i], dataset[j])
loss = wasserstein.dist(C=.1, nsteps=FLAGS.loss_steps)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
res = sess.run(loss)
print_flush('%f ' % res)
print_flush('\n')
if __name__ == '__main__':
tf.app.run(main)
|
tarzenda/gasp
|
tests/mockbackends.py
|
Python
|
gpl-3.0
| 772
| 0
|
# -*- coding: utf-8 -*-
#
|
# This program is part of GASP, a toolkit for newbie Python Programmers.
# Copyright (C) 2009, the GASP Development Team
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# You should have received a copy of the GNU General
|
Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
class MockBackEnd(object):
def __init__(self):
self.screen = None
self.rate = None
def create_screen(self, screen):
self.screen = screen
def set_frame_rate(self, rate):
self.rate = rate
|
skolsuper/imagedicer
|
imagedicer.py
|
Python
|
mit
| 3,913
| 0.004856
|
import Image
import os
import math
import argparse
# Set the root directory
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
HTML_DIR = os.path.join(
|
BASE_DIR, 'diced_images')
def dice(image_path, out_name, out_ext, outdir, slices):
img = Image.open(image_path) # Load image
imgdir = os.path.join(outdir, out_name)
if not os.path.exists(imgdir):
os.makedirs(imgdir)
imageWidth, imageHeight = img.size # Get image dimensions
# Make sure the integer width
|
s are bigger than the floats to avoid
# making 1px wide slices at the edges
sliceWidth = int(math.ceil(float(imageWidth) / slices))
sliceHeight = int(math.ceil(float(imageHeight) / slices))
percent = 100.0 / slices
html_file = open(os.path.join(HTML_DIR, out_name + '.html'), 'w+')
html_file.write('''
<style>
.dicedimage {
padding: 0; margin: 0; border-width: 0;
height: 100%%; width: 100%%;
}
.dicedimage-row {
width: %(imageWidth)spx; height: %(sliceHeight)spx;
padding: 0; margin: 0; border-width: 0;
}
.dicedimage img {
display: inline;
padding: 0; margin: 0; border-width: 0;
}
</style>
<div class="dicedimage">
''' % locals())
left = 0 # Set the left-most edge
upper = 0 # Set the top-most edge
while (upper < imageHeight):
html_file.write('<div class="dicedimage-row"><!--\n')
while (left < imageWidth):
# If the bottom and right of the cropping box overruns the image.
if (upper + sliceHeight > imageHeight and \
left + sliceWidth > imageWidth):
bbox = (left, upper, imageWidth, imageHeight)
# If the right of the cropping box overruns the image
elif (left + sliceWidth > imageWidth):
bbox = (left, upper, imageWidth, upper + sliceHeight)
# If the bottom of the cropping box overruns the image
elif (upper + sliceHeight > imageHeight):
bbox = (left, upper, left + sliceWidth, imageHeight)
# If the entire cropping box is inside the image,
# proceed normally.
else:
bbox = (left, upper, left + sliceWidth, upper + sliceHeight)
working_slice = img.crop(bbox) # Crop image based on created bounds
# Save your new cropped image.
dice_filename = '_'.join(['dice', str(upper), str(left)]) + out_ext
dice_path = os.path.join(imgdir, dice_filename)
working_slice.save(dice_path)
html_file.write(
'''
--><img class="dicedimage-piece" src="%s/%s"><!--\n
''' % (
diced_images_dir.split('/', 1)[1],
'/'.join([out_name, dice_filename])
)
)
left += sliceWidth # Increment the horizontal position
html_file.write('--></div>\n')
upper += sliceHeight # Increment the vertical position
left = 0
html_file.write('</div>')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("image_file", help="Path to an image file")
args = parser.parse_args()
image_path = args.image_file
try:
fileName, fileExtension = os.path.splitext(image_path.rsplit('/',1)[1])
except IndexError:
fileName, fileExtension = os.path.splitext(image_path)
diced_images_dir = os.path.join(HTML_DIR, '_'.join([fileName, 'pieces']))
if not os.path.exists(diced_images_dir):
os.makedirs(diced_images_dir)
dice(
image_path,
fileName,
fileExtension,
diced_images_dir,
10
)
print "Successfully diced %s" % image_path
|
sernst/cauldron
|
cauldron/test/cli/commands/test_alias.py
|
Python
|
mit
| 2,067
| 0
|
import os
from cauldron.test import support
from cauldron.test.support import scaffolds
class TestAlias(scaffolds.ResultsTest):
"""..."""
def test_unknown_command(self):
"""Should fail if the command is not recognized."""
r = support.run_command('alias fake')
self.assertTrue(r.failed, 'should have failed')
self.assertEqual(r.errors[0].code, 'UNKNOWN_COMMAND')
def test_list(self):
"""..."""
r = support.run_command('alias list')
self.assertFalse(r.failed, 'should not have failed')
def test_add(self):
"""..."""
p = self.get_temp_path('aliaser')
r = support.run_command('alias add test "{}" --temporary'.format(p))
self.assertFalse(r.failed, 'should not have failed')
def test_remove(self):
"""..."""
directory = self.get_temp_path('aliaser')
path = os.path.join(directory, 'test.text')
with open(path, 'w+') as f:
f.wri
|
te('This is a test')
support.run_command('alias add test "{}" --temporary'.format(path))
r = support.run_command('alias remove test --temporary')
self.assertFalse(r.failed, 'should not have failed')
self.assertFalse(r.failed, 'should not have failed')
def test_empty(self):
"""..."""
r = support.run_command('alias add')
self.assertTrue(r.failed, 'should have failed')
self.assertEqual(r.errors[0].code, 'MISSING
|
_ARG')
def test_autocomplete_command(self):
"""..."""
result = support.autocomplete('alias ad')
self.assertEqual(len(result), 1)
self.assertEqual(result[0], 'add')
def test_autocomplete_alias(self):
"""..."""
result = support.autocomplete('alias add fake-alias-not-real')
self.assertEqual(len(result), 0)
def test_autocomplete_path(self):
"""..."""
path = os.path.dirname(os.path.realpath(__file__))
result = support.autocomplete('alias add test {}'.format(path))
self.assertIsNotNone(result)
|
watchdogpolska/feder
|
feder/alerts/filters.py
|
Python
|
mit
| 466
| 0
|
import django_filters
from dal import autocomplete
fr
|
om .models import Alert
class AlertFilter(django_filters.FilterSet):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.filters["reason"].look
|
up_expr = "icontains"
self.filters["author"].widget = autocomplete.ModelSelect2(
url="users:autocomplete"
)
class Meta:
model = Alert
fields = ["reason", "author", "status"]
|
9929105/KEEP
|
keep_backend/repos/migrations/0002_auto__add_field_repository_study.py
|
Python
|
mit
| 8,118
| 0.007761
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Repository.study'
db.add_column(u'repos_repository', 'study',
self.gf('django.db.models.fields.related.ForeignKey')(related_name='repositories', null=True, to=orm['studies.Study']),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Repository.study'
db.delete_column(u'repos_repository', 'study_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'organizations.organization': {
'Meta': {'ordering': "['name']", 'object_name': 'Organization', '_ormbases': [u'auth.Group']},
'gravatar': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
u'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'organization_users'", 'symmetrical': 'False', 'through': u"orm['organizations.OrganizationUser']", 'to': u"orm['auth.User']"})
},
u'organizations.organizationuser': {
'Meta': {'ordering': "['organization', 'user']", 'unique_together': "(('user', 'organization'),)", 'object_name': 'OrganizationUser'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'organization_user'", 'to': u"orm['organizations.Organization']"}),
'pending': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'organization_user'", 'to': u"orm['auth.User']"})
},
u'repos.relationship': {
'Meta': {'ordering': "['name']", 'object_name': 'Relationship'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'repo_child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'child_relations'", 'to': u"o
|
rm['repos.Repository']"}),
'repo_parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parent_relations'", 'to': u"orm['repos.Repository']"})
},
u'repos.repository': {
'Meta': {'ordering': "['org', 'name']", 'object_name': 'Repository'},
'date_created': ('django.db.models.fields.Dat
|
eTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mongo_id': ('django.db.models.fields.CharField', [], {'max_length': '24'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'org': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'repositories'", 'null': 'True', 'to': u"orm['organizations.Organization']"}),
'study': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'repositories'", 'null': 'True', 'to': u"orm['studies.Study']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'repositories'", 'null': 'True', 'to': u"orm['auth.User']"})
},
u'studies.study': {
'Meta': {'ordering': "['name']", 'object_name': 'Study'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'org': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'studies'", 'null': 'True', 'to': u"orm['organizations.Organization']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'studies'", 'null': 'True', 'to': u"orm['auth.User']"})
}
}
complete_apps = ['repos']
|
jminardi/syncnet
|
syncnet/main.py
|
Python
|
mit
| 8,094
| 0.001483
|
import os
import sys
import threading
import SimpleHTTPServer
import SocketServer
import shutil
import enaml
from enaml.qt.qt_application import QtApplication
from PyQt4.QtCore import QFileSystemWatcher
from atom.api import Atom, Unicode, observe, Typed, Property, Int
from btsync import BTSync
import logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.DEBUG)
# Directory where all synced sites will be stored. Each site will be synced to
# a directory whose name is the secret.
STORAGE_PATH = os.path.join(
os.path.expanduser('~'),
os.path.join('Documents','Syncnet','synced_secrets')
)
class SyncNet(Atom):
# The text currently entered in the secret field.
address = Unicode()
# The currently loaded secret.
current_secret = Unicode()
# A list of all locally synced secrets.
known_secrets = Property()
# Instance of the BTSync API wrapper.
btsync = Typed(BTSync, ())
# The QUrl object referencing the currently displayed resource. It must be
# replaced wholesale for the UI to react.
url = Unicode()
# Root path where all synced site directories are added.
storage_path = Unicode()
# The filesystem watcher that monitors all currently synced site
# directories.
_watcher = Typed(QFileSystemWatcher)
# This thread runs the simple http server.
_server_thread = Typed(threading.Thread)
# The simple http server's port
http_port = Int()
### Public Interface #####################################################
def init_secret(self, secret):
""" Creates a new directry at `self.storage_path` and adds it to the
BTSync object to be synced with the given `secret`. The secret is
assumed valid.
Parameters
----------
secret : str
BTSync secret string referencing a directory of html files. This
secret is assumed to already exist on the network.
Notes
-----
The newly created folder's name will be the given `secret`.
"""
path = os.path.join(self.storage_path, secret)
if not os.path.exists(path):
os.mkdir(path)
else:
msg = 'init_secret called with existing secret: {}'.format(path)
logger.debug(msg)
self._watcher.addPath(path)
self.btsync.add_folder(path, secret)
logger.debug('Directory added to BTSync: {}'.format(path))
def load_secret(self, secret):
""" Display the HTML files referenced by the given secret in the View.
If the secret is not synced locally, it will be initialized and synced.
Parameters
----------
secret : str
BTSync secret string
Raises
------
RuntimeError if `secret` is invalid
"""
secret = secret.upper()
if not self.is_valid_secret(secret):
msg = 'Attempted to load invalid secret: {}'.format(secret)
raise RuntimeError(msg)
if secret not in self.known_secrets:
self.init_secret(secret)
# Store the currently loaded secret so its directory can be monitored.
self.current_secret = secret
# Ensure the HTTP server is running before the url is set.
if self._server_thread is None:
logger.debug('Creating server thread')
self._server_thread = self._create_server_thread()
url = 'http://localhost:{}/{}'.format(self.http_port, secret)
self.url = '' # FIXME hack to get the webview to reload
self.url = url
logger.debug('URL set to: {}'.format(url))
def is_valid_secret(self, secret):
""" True if the given `secret` is a valid btsync secret string. A
valid secret is a 160 bit base32 encoded string with an 'A' or 'B'
prepended.
"""
if not (secret.startswith('A') or secret.startswith('B')):
return False
if len(se
|
cret) != 33:
return False
if not secret.isupper():
return False
# ensure only legal chars as defined by RFC 4648
for char in ('1', '8', '9', '='):
if char in secret:
return False
return True
### Observers ############################################################
@observe('address')
def _
|
address_changed(self, change):
""" Check the text entered into the address field to see if it contains
a valid secret. If so, attempt to load that secret.
"""
address = self.address.upper()
if self.is_valid_secret(address):
self.load_secret(address)
def on_directory_changed(self, dirname):
""" Slot connected to the `QFileSystemWatcher.directoryChanged` Signal.
"""
# If the directory containing the currently loaded secret changes, it
# is reloaded.
_, secret = os.path.split(os.path.normpath(dirname))
if secret == self.current_secret:
self.load_secret(secret)
def on_link_clicked(self, url):
""" Slot connected to the `QWebView.linkClicked` Signal.
"""
self._update_address_bar(url)
if url.scheme() == 'sync':
secret = url.host().upper()
if self.is_valid_secret(secret):
self.load_secret(secret)
else:
msg = 'Attempted to load invalid secret: {}'
logger.debug(msg.format(url.toString()))
else:
self.url = url.toString()
def on_url_changed(self, url):
""" Slot connected to the `QWebView.urlChanged` Signal.
"""
self._update_address_bar(url)
### Default methods ######################################################
def _default__watcher(self):
_watcher = QFileSystemWatcher()
_watcher.directoryChanged.connect(self.on_directory_changed)
return _watcher
def _default_storage_path(self):
storage_path = STORAGE_PATH
if not os.path.exists(storage_path):
os.makedirs(storage_path)
logger.debug('Creating storage path: {}'.format(storage_path))
return storage_path
### Property getters #####################################################
def _get_known_secrets(self):
""" List of all locally synced secrets. Getter for known_secrets.
"""
directories = os.listdir(self.storage_path)
secrets = [x['secret'] for x in self.btsync.get_folders()]
tracked_directories = filter((lambda x:x in secrets), directories)
return tracked_directories
### Private Interface ####################################################
def _create_server_thread(self):
os.chdir(self.storage_path)
handler = SimpleHTTPServer.SimpleHTTPRequestHandler
httpd = SocketServer.TCPServer(('localhost', 0), handler)
_, port = httpd.server_address
self.http_port = port
logger.debug('Serving on port #{}'.format(port))
t = threading.Thread(target=httpd.serve_forever)
t.daemon = True # don't hang on exit
t.start()
return t
def _update_address_bar(self, url):
"""
Parameters
----------
url : QUrl
The currently displayed url
"""
if url.host() == 'localhost':
self.address = url.path()[1:]
elif url.scheme() == 'sync':
self.address = url.host().upper()
else:
self.address = url.toString()
if __name__ == '__main__':
with enaml.imports():
from syncnet_view import SyncNetView
syncnet = SyncNet()
if getattr(sys, 'frozen', False):
HERE = os.path.dirname(sys.executable)
btsync_path = os.path.join(
HERE, 'BitTorrent\ Sync.app/Contents/MacOS/BitTorrent\ Sync')
syncnet.btsync.btsync_path = btsync_path
syncnet.btsync.start()
app = QtApplication()
view = SyncNetView(model=syncnet)
view.show()
app.start()
|
baidu/broc
|
dependency/BrocConfig.py
|
Python
|
apache-2.0
| 5,186
| 0.003471
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# Copyright (c) 2015 Baidu.com, Inc. All Rights Reserved
#
################################################################################
"""
Description : manage config file $HOME/.broc.rc
Authors : zhousongsong(doublesongsong@gmail.com)
Date : 2015-09-18 10:28:23
"""
import os
import sys
import ConfigParser
broc_dir = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
sys.path.insert(0, broc_dir)
from dependency import BrocModule_pb2
class BrocConfigError(Exception):
"""
"""
def __init__(self, msg):
"""
Args:
msg : the error msg
"""
self._msg = msg
def __str__(self):
"""
"""
return self._msg
class BrocConfig(object):
"""
this class manages the .broc.rc in $HOME
"""
class __impl(object):
"""Implementation of singleton interface"""
def __init__(self):
"""
"""
self._file = os.path.join(os.environ['HOME'], '.broc.rc')
self._svn_repo_domain = 'https://svn.github.com'
self._git_repo_domain = 'https://github.com'
self._svn_postfix_branch = "BRANCH"
self._svn_postfix_tag = "PD_BL"
def __str__(self):
"""
"""
return "svn repo domain: %s\ngit repo domain: %s\n \
svn postfix branch: %s\nsvn postfix tag: %s"% (self._svn_repo_domain,
self._git_repo_domain,
self._svn_postfix_branch,
self._svn_postfix_tag)
def Id(self):
"""
test method, return singleton id
"""
return id(self)
def load(self):
"""
load broc configurations
Raise:
if load config failed, raise BrocConfigError
"""
try:
# if configuration file does not exists in $HOME, create one
if not os.path.isfile(self._file):
cfgfile = open(self._file, 'w')
conf = ConfigParser.ConfigParser()
conf.add_section('repo')
conf.set('repo', 'svn_repo_domain', self._svn_repo_domain)
conf.set('repo', 'git_repo_domain', self._git_repo_domain)
conf.set('repo', 'svn_postfix_branch', 'BRANCH')
conf.set('repo', 'svn_postfix_tag', 'PD_BL')
conf.write(cfgfile)
cfgfile.close()
else:
cfgfile = open(self._file, 'r')
conf = ConfigParser.ConfigParser()
conf.read(self._file)
self._svn_repo_domain = conf.get('repo', 'svn_repo_domain')
self._git_repo_domain = conf.get('repo', 'git_repo_domain')
self._svn_po
|
stfix_branch = conf.get('repo', 'svn_postfix_branch')
self._sv
|
n_postfix_tag = conf.get('repo', 'svn_postfix_tag')
except ConfigParser.Error as e:
raise BrocConfigError(str(e))
def RepoDomain(self, repo_type):
"""
return repository domain
Args:
repo_type : BrocMode_pb2.Module.EnumRepo
"""
if repo_type == BrocModule_pb2.Module.SVN:
return self._svn_repo_domain
elif repo_type == BrocModule_pb2.Module.GIT:
return self._git_repo_domain
def SVNPostfixBranch(self):
"""
return postfix of svn branch
"""
return self._svn_postfix_branch
def SVNPostfixTag(self):
"""
return postfix of svn tag
"""
return self._svn_postfix_tag
def Dump(self):
"""
dump broc config
"""
print("-- svn domain : %s" % self._svn_repo_domain)
print("-- git domain : %s" % self._git_repo_domain)
print("-- svn branch posfix : %s" % self._svn_postfix_branch)
print("-- svn tag postfix : %s" % self._svn_postfix_tag)
# class BrocConfig
__instance = None
def __init__(self):
""" Create singleton instance """
# Check whether we already have an instance
if BrocConfig.__instance is None:
# Create and remember instance
BrocConfig.__instance = BrocConfig.__impl()
BrocConfig.__instance.load()
# Store instance reference as the only member in the handle
self.__dict__['_BrocConfig__instance'] = BrocConfig.__instance
def __getattr__(self, attr):
""" Delegate access to implementation """
return getattr(self.__instance, attr)
def __setattr__(self, attr, value):
""" Delegate access to implementation """
return setattr(self.__instance, attr, value)
|
dedupeio/dedupe-examples
|
patent_example/patent_evaluation.py
|
Python
|
mit
| 1,311
| 0.006102
|
import csv
import collections
import itertools
def evaluateDuplicates(found_dupes, true_dupes):
true_positives = found_dupes.intersection(true_dupes)
false_positives = found_dupes.difference(true_dupes)
uncovered_dupes = true_dupes.difference(found_dupes)
print('found duplicate')
print(len(found_dupes))
print(len(true_dupes))
print('precision')
print(1 - len(false_positives) / float(len(found_dupes)))
print('recall')
print(len(true_positives) / float(len(true_dupes)))
def dupePairs(filename, colname) :
dupe_d = collections.defaultdict(list)
with open(fi
|
lename) as f:
reader = csv.DictReader(f, delimiter=',', quotechar='"')
for row in reader:
dupe_d[row[colname]].append(row['person_id'])
if 'x' in dupe_d :
del dupe_d['x']
dupe_s = set([])
for (unique_id, cluster) in dupe_d.items():
if len(cluster) > 1:
for pair in itertools.combinations(cluster, 2):
dupe_s.add(frozenset(pair))
return dupe_s
dedupe_cluster
|
s = 'patstat_output.csv'
manual_clusters = 'patstat_reference.csv'
test_dupes = dupePairs(dedupe_clusters, 'Cluster ID')
true_dupes = dupePairs(manual_clusters, 'leuven_id')
evaluateDuplicates(test_dupes, true_dupes)
|
pipermerriam/flex
|
flex/loading/schema/paths/path_item/__init__.py
|
Python
|
mit
| 1,084
| 0
|
from flex.datastructures import (
ValidationDict,
)
from flex.constants import (
OBJECT,
)
from flex.validation.common import (
generate_object_validator,
)
from
|
.operation import (
operation_validator,
)
from .parameters import (
parameters_validator,
)
path_item_schema = {
'type': OBJECT,
}
non_field_validators = ValidationDict()
non_field_validators.add_property_validator('get', o
|
peration_validator)
non_field_validators.add_property_validator('put', operation_validator)
non_field_validators.add_property_validator('post', operation_validator)
non_field_validators.add_property_validator('delete', operation_validator)
non_field_validators.add_property_validator('options', operation_validator)
non_field_validators.add_property_validator('head', operation_validator)
non_field_validators.add_property_validator('patch', operation_validator)
non_field_validators.add_property_validator('parameters', parameters_validator)
path_item_validator = generate_object_validator(
schema=path_item_schema,
non_field_validators=non_field_validators,
)
|
thydeyx/LeetCode-Python
|
Palindrome Number.py
|
Python
|
mit
| 302
| 0.023179
|
class Solution(object):
def isPalindrome(self, x):
"""
:
|
type x: int
:rtype: bool
"""
s=str(x)
l=list(s)
l.reverse()
sq=''
s1=sq.join(l)
|
if s==s1:
return True
else:
return False
|
aronparsons/spacewalk
|
client/rhel/rhn-client-tools/src/firstboot-legacy-rhel5/rhn_provide_certificate_gui.py
|
Python
|
gpl-2.0
| 2,411
| 0.004977
|
# Copyright 2006 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
# Authors:
# Daniel Benamy <dbenamy@redhat.com>
import os
import sys
sys.path.append("/usr/share/rhn/up2date_client/")
sys.path.append("/usr/share/rhn")
import rhnreg
import rhnregGui
import up2dateErrors
from rhn_register_firstboot_gui_window import RhnRegisterFirstbootGuiWindow
import gtk
from gtk import glade
import gettext
t = gettext.translation('rhn-client-tools', fallback=True)
_ = t.ugettext
gtk.glade.bindtextdomain("rhn-client-tools")
class RhnProvideCertificateWindow(RhnRegisterFirstbootGuiWindow, rhnregGui.ProvideCertificatePage):
runPriority=107
moduleName = _("Provide Certificate")
windowTitle = moduleName
shortMessage = _("Provide a certificate for this Red Hat Satellite server")
needsparent = 1
needsnetwork = 1
noSidebar = True
def __init__(self):
RhnRegisterFirstbootGuiWindow.__init__(self)
rhnregGui.ProvideCertificatePage.__init__(self)
if rh
|
nreg.registered():
self.skipme = True
def _getVbox(self):
return self.provideCertificatePageVbox()
def apply(self, *args):
"""Returns True to change the page or None to stay on the same page."""
status = self.provideCertificatePageApply()
if status == 0: # cert was installed
return True
elif status == 1: # the user doesn't want to provide a cert right now
# TODO write a
|
message to disk like the other cases? need to decide
# how we want to do error handling in general.
self.parent.setPage("rhn_finish_gui")
return True
else: # an error occurred and the user was notified
assert status == 2
return None
childWindow = RhnProvideCertificateWindow
|
tsl143/zamboni
|
mkt/constants/base.py
|
Python
|
bsd-3-clause
| 10,226
| 0
|
from tower import ugettext_lazy as _
# Add-on and File statuses.
STATUS_NULL = 0
STATUS_PENDING = 2
STATUS_PUBLIC = 4
STATUS_DISABLED = 5
STATUS_DELETED = 11
STATUS_REJECTED = 12
STATUS_APPROVED = 13
STATUS_BLOCKED = 15
STATUS_UNLISTED = 16
# AMO-only statuses. Kept here only for memory and to not re-use the IDs.
_STATUS_UNREVIEWED = 1
_STATUS_NOMINATED = 3
_STATUS_LISTED = 6 # See bug 616242.
_STATUS_BETA = 7
_STATUS_LITE = 8
_STATUS_LITE_AND_NOMINATED = 9
_STATUS_PURGATORY = 10 # A temporary home; bug 614686
_STATUS_REVIEW_PENDING = 14 # Themes queue, reviewed, needs further action.
STATUS_CHOICES = {
STATUS_NULL: _(u'Incomplete'),
STATUS_PENDING: _(u'Pending approval'),
STATUS_PUBLIC: _(u'Published'),
STATUS
|
_DISABLED: _(u'Banned from Marketplace'),
STATUS_DELETED: _(u'Deleted'),
STATUS_REJECTED: _(u'Rejected'),
# Approved, but the developer would like to put it public when they want.
# The need to go to the marketplace and actualy make it public.
STATUS_APPROVED: _(u'Approved but private'),
STATUS_BLOCKED: _(u'Blocked'),
STATUS_UNLISTED: _(u'Unlisted'),
}
# Marketplace file status terms.
MKT_STATUS_FILE_CHOICES = STATUS_CHOICES.copy()
MKT_STAT
|
US_FILE_CHOICES[STATUS_DISABLED] = _(u'Obsolete')
MKT_STATUS_FILE_CHOICES[STATUS_APPROVED] = _(u'Approved')
MKT_STATUS_FILE_CHOICES[STATUS_PUBLIC] = _(u'Published')
# We need to expose nice values that aren't localisable.
STATUS_CHOICES_API = {
STATUS_NULL: 'incomplete',
STATUS_PENDING: 'pending',
STATUS_PUBLIC: 'public',
STATUS_DISABLED: 'disabled', # TODO: Change to 'banned' for API v2.
STATUS_DELETED: 'deleted',
STATUS_REJECTED: 'rejected',
STATUS_APPROVED: 'waiting', # TODO: Change to 'private' for API v2.
STATUS_BLOCKED: 'blocked',
STATUS_UNLISTED: 'unlisted',
}
STATUS_CHOICES_API_LOOKUP = {
'incomplete': STATUS_NULL,
'pending': STATUS_PENDING,
'public': STATUS_PUBLIC,
'disabled': STATUS_DISABLED, # TODO: Change to 'banned' for API v2.
'deleted': STATUS_DELETED,
'rejected': STATUS_REJECTED,
'waiting': STATUS_APPROVED, # TODO: Change to 'private' for API v2.
'blocked': STATUS_BLOCKED,
'unlisted': STATUS_UNLISTED,
}
STATUS_CHOICES_API_v2 = {
STATUS_NULL: 'incomplete',
STATUS_PENDING: 'pending',
STATUS_PUBLIC: 'public',
STATUS_DISABLED: 'banned',
STATUS_DELETED: 'deleted',
STATUS_REJECTED: 'rejected',
STATUS_APPROVED: 'private',
STATUS_BLOCKED: 'blocked',
STATUS_UNLISTED: 'unlisted',
}
STATUS_CHOICES_API_LOOKUP_v2 = {
'incomplete': STATUS_NULL,
'pending': STATUS_PENDING,
'public': STATUS_PUBLIC,
'banned': STATUS_DISABLED,
'deleted': STATUS_DELETED,
'rejected': STATUS_REJECTED,
'private': STATUS_APPROVED,
'blocked': STATUS_BLOCKED,
'unlisted': STATUS_UNLISTED,
}
# Publishing types.
PUBLISH_IMMEDIATE = 0
PUBLISH_HIDDEN = 1
PUBLISH_PRIVATE = 2
REVIEWED_STATUSES = (STATUS_PUBLIC, STATUS_APPROVED, STATUS_UNLISTED)
UNREVIEWED_STATUSES = (STATUS_PENDING,)
VALID_STATUSES = (STATUS_PENDING, STATUS_PUBLIC, STATUS_UNLISTED,
STATUS_APPROVED)
# LISTED_STATUSES are statuses that should return a 200 on the app detail page
# for anonymous users.
LISTED_STATUSES = (STATUS_PUBLIC, STATUS_UNLISTED)
# An add-on in one of these statuses can become premium.
PREMIUM_STATUSES = (STATUS_NULL, STATUS_PENDING)
# Newly submitted apps begin life at this status.
WEBAPPS_UNREVIEWED_STATUS = STATUS_PENDING
# These apps have been approved and are listed; or could be without further
# review.
WEBAPPS_APPROVED_STATUSES = (STATUS_PUBLIC, STATUS_UNLISTED, STATUS_APPROVED)
# An app with this status makes its detail page "invisible".
WEBAPPS_UNLISTED_STATUSES = (STATUS_DISABLED, STATUS_PENDING, STATUS_APPROVED,
STATUS_REJECTED)
# These apps shouldn't be considered anymore in mass-emailing etc.
WEBAPPS_EXCLUDED_STATUSES = (STATUS_DISABLED, STATUS_DELETED, STATUS_REJECTED)
# Add-on author roles.
AUTHOR_ROLE_VIEWER = 1
AUTHOR_ROLE_DEV = 4
AUTHOR_ROLE_OWNER = 5
AUTHOR_ROLE_SUPPORT = 6
AUTHOR_CHOICES = (
(AUTHOR_ROLE_OWNER, _(u'Owner')),
(AUTHOR_ROLE_DEV, _(u'Developer')),
(AUTHOR_ROLE_VIEWER, _(u'Viewer')),
(AUTHOR_ROLE_SUPPORT, _(u'Support')),
)
AUTHOR_CHOICES_NAMES = dict(AUTHOR_CHOICES)
# ADDON_WEBAPP Types
ADDON_WEBAPP_HOSTED = 1
ADDON_WEBAPP_PACKAGED = 2
ADDON_WEBAPP_PRIVILEGED = 3
ADDON_WEBAPP_TYPES = {
ADDON_WEBAPP_HOSTED: 'hosted',
ADDON_WEBAPP_PACKAGED: 'packaged',
ADDON_WEBAPP_PRIVILEGED: 'privileged',
}
ADDON_WEBAPP_TYPES_LOOKUP = dict((v, k) for k, v in ADDON_WEBAPP_TYPES.items())
ADDON_FREE = 0
ADDON_PREMIUM = 1
ADDON_PREMIUM_INAPP = 2
ADDON_FREE_INAPP = 3
# The addon will have payments, but they aren't using our payment system.
ADDON_OTHER_INAPP = 4
ADDON_PREMIUM_TYPES = {
ADDON_FREE: _('Free'),
ADDON_PREMIUM: _('Premium'),
ADDON_PREMIUM_INAPP: _('Premium with in-app payments'),
ADDON_FREE_INAPP: _('Free with in-app payments'),
ADDON_OTHER_INAPP: _("I'll use my own system for in-app payments")
}
# Non-locale versions for the API.
ADDON_PREMIUM_API = {
ADDON_FREE: 'free',
ADDON_PREMIUM: 'premium',
ADDON_PREMIUM_INAPP: 'premium-inapp',
ADDON_FREE_INAPP: 'free-inapp',
ADDON_OTHER_INAPP: 'other',
}
ADDON_PREMIUM_API_LOOKUP = dict((v, k) for k, v in ADDON_PREMIUM_API.items())
# Apps that require some sort of payment prior to installing.
ADDON_PREMIUMS = (ADDON_PREMIUM, ADDON_PREMIUM_INAPP)
# Apps that do *not* require a payment prior to installing.
ADDON_FREES = (ADDON_FREE, ADDON_FREE_INAPP, ADDON_OTHER_INAPP)
ADDON_INAPPS = (ADDON_PREMIUM_INAPP, ADDON_FREE_INAPP)
ADDON_HAS_PAYMENTS = (ADDON_FREE_INAPP, ADDON_PREMIUM, ADDON_PREMIUM_INAPP)
# Edit addon information
MAX_TAGS = 20
MIN_TAG_LENGTH = 2
MAX_CATEGORIES = 2
# Icon sizes we want to generate and expose in the API.
CONTENT_ICON_SIZES = [32, 48, 64, 128]
# Promo img sizes we want to generate and expose in the API.
PROMO_IMG_SIZES = [320, 640, 1050]
PROMO_IMG_MINIMUMS = (1050, 300)
# Preview upload sizes [thumb, full]
ADDON_PREVIEW_SIZES = [(200, 150), (700, 525)]
# Accepted image MIME-types
IMG_TYPES = ('image/png', 'image/jpeg', 'image/jpg')
VIDEO_TYPES = ('video/webm',)
# Editor Tools
EDITOR_VIEWING_INTERVAL = 8 # How often we ping for "who's watching?"
# For use in urls.
ADDON_UUID = r'(?P<uuid>[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12})'
APP_SLUG = r"""(?P<app_slug>[^/<>"']+)"""
# Reviewer Incentive Scores.
# Note: Don't change these since they're used as keys in the database.
REVIEWED_MANUAL = 0
REVIEWED_WEBAPP_HOSTED = 70
REVIEWED_WEBAPP_PACKAGED = 71
REVIEWED_WEBAPP_REREVIEW = 72
REVIEWED_WEBAPP_UPDATE = 73
REVIEWED_WEBAPP_PRIVILEGED = 74
REVIEWED_WEBAPP_PRIVILEGED_UPDATE = 75
REVIEWED_WEBAPP_PLATFORM_EXTRA = 76 # Not used as a key
REVIEWED_APP_REVIEW = 81
REVIEWED_APP_REVIEW_UNDO = 82
REVIEWED_WEBAPP_TARAKO = 90
REVIEWED_APP_ABUSE_REPORT = 100
REVIEWED_WEBSITE_ABUSE_REPORT = 101
REVIEWED_CHOICES = {
REVIEWED_MANUAL: _('Manual Reviewer Points'),
REVIEWED_WEBAPP_HOSTED: _('Web App Review'),
REVIEWED_WEBAPP_PACKAGED: _('Packaged App Review'),
REVIEWED_WEBAPP_PRIVILEGED: _('Privileged App Review'),
REVIEWED_WEBAPP_REREVIEW: _('Web App Re-review'),
REVIEWED_WEBAPP_UPDATE: _('Updated Packaged App Review'),
REVIEWED_WEBAPP_PRIVILEGED_UPDATE: _('Updated Privileged App Review'),
REVIEWED_APP_REVIEW: _('Moderated App Review'),
REVIEWED_APP_REVIEW_UNDO: _('App Review Moderation Reverted'),
REVIEWED_WEBAPP_TARAKO: _('Tarako App Review'),
REVIEWED_APP_ABUSE_REPORT: _('App Abuse Report Read'),
REVIEWED_WEBSITE_ABUSE_REPORT: _('Website Abuse Report Read'),
}
REVIEWED_SCORES = {
REVIEWED_MANUAL: 0,
REVIEWED_WEBAPP_HOSTED: 60,
REVIEWED_WEBAPP_PACKAGED: 60,
REVIEWED_WEBAPP_PRIVILEGED: 120,
REVIEWED_WEBAPP_REREVIEW: 30,
REVIEWED_WEBAPP_UPDATE: 40,
REVIEWED_WEBAPP_PRIVILEGED_UPDATE: 80,
REVIEWED_APP_REVIEW: 1,
REVIEWED_APP_REVIEW_UNDO: -1, # -REVIEWED_APP_REVIEW
REVIEWED_WEBAPP_TARAKO: 30,
REVIEWED_WEBAPP_PLATFORM_EXTRA: 10,
REVIEWED_APP
|
Widdershin/community-review-poster
|
autoposter/__init__.py
|
Python
|
mit
| 91
| 0.010989
|
fr
|
om .app import App
from .reviews import Reviews
from .r_longb
|
oarding import RLongboarding
|
MacHu-GWU/single_file_module-project
|
sfm/obj_file_io.py
|
Python
|
mit
| 6,059
| 0.000332
|
# -*- coding: utf-8 -*-
"""
object file io is a Python object to single file I/O framework. The word
'framework' means you can use any serialization/deserialization algorithm here.
- dump: dump python object to a file.
- safe_dump: add atomic writing guarantee for ``dump``.
- load: load python object from a file.
Features:
1. ``compress``: built-in compress/decompress options.
2. ``overwrite``: an option to prevent from overwrite existing file.
3. ``verbose``: optional built-in logger can display help infomation.
Usage:
suppose you have a function (dumper function, has to take python object as
input, and return a binary object) can dump python object to binary::
import pickle
def dump(obj):
return pickle.dumps(obj)
def load(binary):
return pickle.loads(binary)
You just need to add a decorator, and new function will do all magic for you:
from obj_file_io import dump_func, safe_dump_func, load_func
@dump_func
def dump(obj):
return pickle.dumps(obj)
@safe_dump_func
def safe_dump(obj):
return pickle.dumps(obj)
@load_func
def load(binary):
return pickle.loads(binary)
**中文文档**
object file io是一个将Python对象对单个本地文件的I/O
"""
import os
import time
import zlib
import logging
import inspect
from atomicwrites import atomic_write
# logging util
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
logger.addHandler(stream_handler)
def prt_console(message, verbose):
"""Print message to console, if ``verbose`` is True.
"""
if verbose:
logger.info(message)
def _check_serializer_type(serializer_type):
if serializer_type not in ["binary", "str"]:
raise ValueError("serializer_type has to be one of 'binary' or 'str'!")
# dump, load
def _dump(obj, abspath, serializer_type,
dumper_func=None,
compress=True,
overwrite=False,
verbose=False,
**kwargs):
"""Dump object to file.
:param abspath: The file path you want dump to.
:type abspath: str
:param serializer_type: 'binary' or 'str'.
:type serializer_type: str
:param dumper_func: A dumper function that takes an object as input, return
binary or string.
:type dumper_func: callable function
:param compress: default ``False``. If True, then compress binary.
:type compress: bool
:param overwrite: default ``False``, If ``True``, when you dump to
existing file, it silently overwrite it. If ``False``, an alert
message is shown. Default setting ``False`` is to prevent overwrite
file by mistake.
:type overwrite: boolean
:param verbose: default True, help-message-display trigger.
:type verbose: boolean
"""
_check_serializer_type(serializer_type)
if not inspect.isfunction(dumper_func):
raise TypeError("dumper_func has to be a function take object as input "
"and return binary!")
prt_console("\nDump to '%s' ..." % abspath, verbose)
if os.path.exists(abspath):
if not overwrite:
prt_console(
" Stop! File exists and overwrite is not allowed",
verbose,
)
return
st = time.clock()
b_or_str = dumper_func(obj, **kwargs)
if serializer_type is "str":
b = b_or_st
|
r.encode("utf-8")
else:
b = b_or_str
if compress:
b = zlib.compress(b)
with atomic
|
_write(abspath, overwrite=overwrite, mode="wb") as f:
f.write(b)
elapsed = time.clock() - st
prt_console(" Complete! Elapse %.6f sec." % elapsed, verbose)
if serializer_type is "str":
return b_or_str
else:
return b
def _load(abspath, serializer_type,
loader_func=None,
decompress=True,
verbose=False,
**kwargs):
"""load object from file.
:param abspath: The file path you want load from.
:type abspath: str
:param serializer_type: 'binary' or 'str'.
:type serializer_type: str
:param loader_func: A loader function that takes binary as input, return
an object.
:type loader_func: callable function
:param decompress: default ``False``. If True, then decompress binary.
:type decompress: bool
:param verbose: default True, help-message-display trigger.
:type verbose: boolean
"""
_check_serializer_type(serializer_type)
if not inspect.isfunction(loader_func):
raise TypeError("loader_func has to be a function take binary as input "
"and return an object!")
prt_console("\nLoad from '%s' ..." % abspath, verbose)
if not os.path.exists(abspath):
raise ValueError("'%s' doesn't exist." % abspath)
st = time.clock()
with open(abspath, "rb") as f:
b = f.read()
if decompress:
b = zlib.decompress(b)
if serializer_type is "str":
obj = loader_func(b.decode("utf-8"), **kwargs)
else:
obj = loader_func(b, **kwargs)
elapsed = time.clock() - st
prt_console(" Complete! Elapse %.6f sec." % elapsed, verbose)
return obj
def dump_func(serializer_type):
"""A decorator for ``_dump(dumper_func=dumper_func, **kwargs)``
"""
def outer_wrapper(dumper_func):
def wrapper(*args, **kwargs):
return _dump(
*args,
dumper_func=dumper_func, serializer_type=serializer_type,
**kwargs
)
return wrapper
return outer_wrapper
def load_func(serializer_type):
"""A decorator for ``_load(loader_func=loader_func, **kwargs)``
"""
def outer_wrapper(loader_func):
def wrapper(*args, **kwargs):
return _load(
*args,
loader_func=loader_func, serializer_type=serializer_type,
**kwargs
)
return wrapper
return outer_wrapper
|
grigorisg9gr/menpo
|
menpo/image/test/image_copy_test.py
|
Python
|
bsd-3-clause
| 1,372
| 0
|
import numpy as np
from menpo.image import Image, BooleanImage, MaskedImage
from menpo.shape import PointCloud
from menpo.testing import is_same_array
def test_image_copy():
pixels = np.ones([1, 10, 10])
landmarks = PointCloud(np.ones([3, 2]), copy=False)
im = Image(pixels, copy=False)
im.landmarks['test'] = landmarks
im_copy = im.copy()
assert (not is_same_array(im.pixels, im_copy.pixels))
assert (not is_same_array(im_copy.landmarks['test'].points,
im.landmarks['test'].points))
def test_booleanimage_copy():
pixels = np.ones([10, 10], dtype=np.bool)
landmarks = PointCloud(np.ones([3, 2]), copy=False)
im = BooleanImage(pixels, copy=False)
im.landmar
|
ks['test'] = landmarks
|
im_copy = im.copy()
assert (not is_same_array(im.pixels, im_copy.pixels))
assert (not is_same_array(im_copy.landmarks['test'].points,
im.landmarks['test'].points))
def test_maskedimage_copy():
pixels = np.ones([1, 10, 10])
landmarks = PointCloud(np.ones([3, 2]), copy=False)
im = MaskedImage(pixels, copy=False)
im.landmarks['test'] = landmarks
im_copy = im.copy()
assert (not is_same_array(im.pixels, im_copy.pixels))
assert (not is_same_array(im_copy.landmarks['test'].points,
im.landmarks['test'].points))
|
siddhantgoel/tornado-sqlalchemy
|
examples/multiple_databases.py
|
Python
|
mit
| 2,508
| 0
|
from sqlalchemy import BigInteger, Column, String
from tornado.gen import coroutine
from tornado.ioloop import IOLoop
from tornado.web import Application, RequestHandler
from tornado_sqlalchemy import (
SessionMixin,
as_future,
set_max_workers,
SQLAlchemy,
)
db = SQLAlchemy()
set_max_workers(10)
class User(db.Model):
__tablename__ = 'users'
id = Column(BigInteger, primary_key=True)
username = Column(String(255))
class Foo(db.Model):
__bind_key__ = 'foo'
__tablename__ = 'foo'
id = Column(BigInteger, primary_key=True)
foo = Column(String(255))
class Bar(db.Model):
__bind_key__ = 'bar'
__tablename__ = 'bar'
id = Column(BigInteger, primary_key=True)
bar = Column(String(255))
class SynchronousRequestHandler(SessionMixin, RequestHandler):
def get(self):
with self.make_session() as session:
count = session.query(User).count()
# OR count = self.session.query(User).count()
self.write('{} users so far!'.format(count))
class GenCoroutinesRequestHandler(SessionMixin, RequestHandler):
@coroutine
def get(self):
with self.make_session() as session:
session.add(User(username='b'))
session.add(Foo(foo='foo'))
session.add(Bar(bar='bar'))
session.commit()
count = yield as_future(session.query(User).count)
self.write('{} users so far!'.format(count))
class NativeCoroutinesRequestHandler(SessionMixin, RequestHandler):
async def get(self):
with self.make_session() as session:
sessio
|
n.add(User(username='c'))
session.add(Foo(foo='d'))
session.add(Bar(bar='e'))
session.commit()
count = await as_future(session.query(User).count)
self.write('{} users so far!'.format(count))
if __name__
|
== '__main__':
db.configure(
url='sqlite://',
binds={'foo': 'sqlite:///foo.db', 'bar': 'sqlite:///bar.db'},
)
app = Application(
[
(r'/sync', SynchronousRequestHandler),
(r'/gen-coroutines', GenCoroutinesRequestHandler),
(r'/native-coroutines', NativeCoroutinesRequestHandler),
],
db=db,
autoreload=True,
)
db.create_all()
session = db.sessionmaker()
session.add(User(id=1, username='a'))
session.commit()
session.close()
print('Listening on port 8888')
app.listen(8888)
IOLoop.current().start()
|
rschnapka/bank-payment
|
account_banking_uk_hsbc/account_banking_uk_hsbc.py
|
Python
|
agpl-3.0
| 5,362
| 0
|
##############################################################################
#
# Copyright (C) 2009 EduSense BV (<http://www.edusense.nl>).
# Copyright (C) 2011 credativ Ltd (<http://www.credativ.co.uk>).
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import date
from openerp.osv import orm, fields
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT as OE_DATEFORMAT
class hsbc_export(orm.Model):
"""HSBC Export"""
_name = 'banking.export.hsbc'
_description = __doc__
_rec_name = 'execution_date'
_columns = {
'payment_order_ids': fields.many2many(
'payment.order',
'account_payment_order_hsbc_rel',
'banking_export_hsbc_id', 'account_order_id',
'Payment Orders',
readonly=True),
'identification':
fields.char('Identification', size=15, readonly=True, select=True),
'execution_date':
fields.date('Execution Date', readonly=True),
'no_transactions':
fields.integer('Number of Transactions', readonly=True),
'total_amount':
fields.float('Total Amount', readonly=True),
'date_generated':
fields.datetime('Generation Date', readonly=True, select=True),
'file':
fields.binary('HSBC File', readonly=True),
'state':
fields.selection([
('draft', 'Draft'),
('sent', 'Sent'),
('done', 'Reconciled'),
], 'State', readonly=True),
}
_defaults = {
'date_generated': lambda *a: date.today().strftime(OE_DATEFORMAT),
'state': 'draft',
}
class payment_line(orm.Model):
"""The standard payment order is using a mixture of details from the
partner record and the res.partner.bank record. For, instance, the account
holder name is coming from the res.partner.bank record, but the company
name and address are coming from the partner address record. This is
problematic because the HSBC payment format is validating for alphanumeric
characters in the company name and address. So, "Great Company Ltd." and
"Great Company s.a." will cause an error because they have full-stops in
the name.
A better approach is to use the name and address details from the
res.partner.bank record always. This way, the address details can be
sanitized for the payments, whilst being able to print the proper name and
address throughout the rest of the system e.g. on invoices.
"""
_inherit = 'payment.line'
def info_owner(self, cr, uid, ids, name=None, args=None, context=None):
if not ids:
return {}
result = {}
info = ''
for line in self.browse(cr, uid, ids, context=context):
owner = line.order_id.mode.bank_id
name = owner.owner_name or owner.partner_id.name
st = owner.street and owner.street or ''
st1 = '' # no street2 in res.partner.bank
zip = owner.zip and owner.zip or ''
city = owner.city and owner.city or ''
zip_city = zip + ' ' + city
cntry = owner.country_id and owner.country_id.name or ''
info = name + "\n".join((st + " ", st1, zip_city, cntry))
result[line.id] = info
return result
def info_partner(self, cr, uid, ids, name=None, args=None, context=None):
if not ids:
return {}
result = {}
info = ''
for line in self.browse(cr, uid, ids, context=context):
partner = line.bank_id
name = partner.owner_name or partner.partner_id.name
st = partner.street and partner.street or ''
st1 = '' # no street2 in res.partner.bank
zip = partner.zip and partner.zip or ''
city = partner.city and partner.city or ''
zip_city = zip + ' ' + city
cntry = partner.country_id and partner.country_id.name or ''
info = name + "\n".join((st + " ", st1, zip_city, cntry))
result[line.id] = info
return result
# Define the info_partner and info_owner so we can override the methods
_columns = {
'info_owner': fields.function(
|
info_owner,
string="Owner Account",
type="text",
help='Address of the Main Partner',
),
'info_partner': fields.function(
info_partner,
string="Destination Account",
type="t
|
ext",
help='Address of the Ordering Customer.'
),
}
|
jeff-allen-mongo/mut
|
mut/tuft/config.py
|
Python
|
apache-2.0
| 911
| 0
|
import importlib.util
class Config:
def __init__(self, path: str) -> None:
spec = importlib.util.spec_from_file_location('conf', path)
self.module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(self.module)
@property
def rst_epilog(self) -> str:
|
"""A string that is appended to the end of every rst file. Useful for
replacements. Defaults to an empty string."""
return str(self.get('rst_epilog', ''))
@property
def source_suffix(self) -> str:
"""The file extension used for source files. Defaults to ".txt"."""
return str(self.get('source_suffix', '.txt'))
def get(self, key: str, default:
|
object) -> object:
try:
return self[key]
except AttributeError:
return default
def __getitem__(self, key: str) -> object:
return getattr(self.module, key)
|
dhirajt/dtc
|
wsgi/dtcbusroutes/dtcbusroutes/wsgi.py
|
Python
|
gpl-3.0
| 1,146
| 0.000873
|
"""
WSGI config for dtcbusroutes project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dtcbusroutes.settings")
# This application object
|
is used by any WSGI server configured to use this
# file. This
|
includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
brakhane/panda3d
|
direct/src/interval/ProjectileIntervalTest.py
|
Python
|
bsd-3-clause
| 437
| 0.009153
|
"""Undocumented Module"""
__all__ = ['doTest']
from panda3d.core import *
from panda3d.dir
|
ect import *
from .IntervalGlobal import *
def doTest():
smiley = loader.loadModel('models/misc/smiley')
smiley.reparentTo(render)
pi = ProjectileInterval(smiley, startPos=Point3(0, 0, 0),
endZ = -10, wayPoint=Point3(10, 0, 0),
timeToWayPoint=3)
pi.loop()
ret
|
urn pi
|
thatcr/cffi-xll
|
src/xlcall/templates/__init__.py
|
Python
|
mit
| 47
| 0.021277
|
fro
|
m .co
|
nstants import *
from ._xlcall import *
|
collinjackson/mojo
|
mojo/devtools/common/devtoolslib/linux_shell.py
|
Python
|
bsd-3-clause
| 2,385
| 0.002935
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import subprocess
from devtoolslib.shell import Shell
from devtoolslib import http_server
class LinuxShell(Shell):
"""Wrapper around Mojo shell running on Linux.
Args:
executable_path: path to the shell binary
command_prefix: optional list of arguments to prepend to the shell command,
allowing e.g. to run the shell under debugger.
"""
def __init__(self, executable_path, command_p
|
refix=None):
self.executable_path = executable_path
self.command_prefix = command_prefix if command_prefix else []
def ServeLocalDirectory(self, local_dir_path, port=0):
"""Serves the content of the local (host) directory, making it available to
the shell under the url returned by the function.
The server will run on a separate thread until the program terminates. The
call returns immediately.
Args:
local_dir_path: path to the directory to be served
port
|
: port at which the server will be available to the shell
Returns:
The url that the shell can use to access the content of |local_dir_path|.
"""
return 'http://%s:%d/' % http_server.StartHttpServer(local_dir_path, port)
def Run(self, arguments):
"""Runs the shell with given arguments until shell exits, passing the stdout
mingled with stderr produced by the shell onto the stdout.
Returns:
Exit code retured by the shell or None if the exit code cannot be
retrieved.
"""
command = self.command_prefix + [self.executable_path] + arguments
return subprocess.call(command, stderr=subprocess.STDOUT)
def RunAndGetOutput(self, arguments):
"""Runs the shell with given arguments until shell exits.
Args:
arguments: list of arguments for the shell
Returns:
A tuple of (return_code, output). |return_code| is the exit code returned
by the shell or None if the exit code cannot be retrieved. |output| is the
stdout mingled with the stderr produced by the shell.
"""
command = self.command_prefix + [self.executable_path] + arguments
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(output, _) = p.communicate()
return p.returncode, output
|
slogan621/tscharts
|
tschartslib/clinicstation/clinicstation.py
|
Python
|
apache-2.0
| 30,523
| 0.003407
|
#(C) Copyright Syd Logan 2017-2020
#(C) Copyright Thousand Smiles Foundation 2017-2020
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
'''
unit tests for clinic station application. Assumes django server is up
and running on the specified host and port
'''
import unittest
import getopt, sys
import json
from tschartslib.service.serviceapi import ServiceAPI
from tschartslib.tscharts.tscharts import Login, Logout
from tschartslib.clinic.clinic import CreateClinic, DeleteClinic
from tschartslib.station.station import CreateStation, DeleteStation
from tschartslib.patient.patient import CreatePatient, GetPatient, DeletePatient
class CreateClinicStation(ServiceAPI):
def __init__(self, host, port, token, clinic, station, active=False, away=True, finished=False, name="", name_es="", level=None):
super(CreateClinicStation, self).__init__()
self.setHttpMethod("POST")
self.setHost(host)
self.setPort(port)
self.setToken(token)
payload = {"clinic": clinic, "away": away, "station": station, "active": active, "name": name, "finished": finished, "name_es": name_es, "level": level}
self.setPayload(payload)
self.setURL("tscharts/v1/clinicstation/")
class GetClinicStation(ServiceAPI):
def makeURL(self):
hasQArgs = False
if not self._id == None:
base = "tscharts/v1/clinicstation/{}/".format(self._id)
else:
base = "tscharts/v1/clinicstation/"
if not self._clinic == None:
if not hasQArgs:
base += "?"
else:
base += "&"
base += "clinic={}".format(self._clinic)
hasQArgs = True
if not self._active == None:
if not hasQArgs:
base += "?"
else:
base += "&"
base += "active={}".format(self._active)
hasQArgs = True
if not self._level == None:
if not hasQArgs:
base += "?"
else:
base += "&"
base += "level={}".format(self._level)
hasQArgs = True
if not self._away == None:
if not hasQArgs:
base += "?"
else:
base += "&"
base += "away={}".format(self._away)
hasQArgs = True
if not self._finished == None:
if not hasQArgs:
base += "?"
else:
base += "&"
base += "finished={}".format(self._finished)
hasQArgs = True
self.setURL(base)
def __init__(self, host, port, token, id=None):
super(GetClinicStation, self).__init__()
self.setHttpMethod("GET")
self.setHost(host)
self.setPort(port)
self.setToken(token)
self._id = None
self._away = None
self._active = None
self._finished = None
self._level = None
self._clinic = None
self.makeURL();
def setId(self, id):
self._id = id;
self.makeURL()
def setAway(self, away):
self._away = away
self.makeURL()
def setFinished(self, finished):
self._finished = finished
self.makeURL()
def setActive(self, active):
self._active = active
self.makeURL()
def setClinic(self, clinic):
self._clinic = clinic
self.makeURL()
def setLevel(self, level):
self._level = level
self.makeURL()
class UpdateClinicStation(ServiceAPI):
def __init__(self, host, port, token, id):
super(UpdateClinicStation, self).__init__()
self.setHttpMethod("PUT")
self.setHost(host)
self.setPort(port)
self.setToken(token)
self._payload = {}
self.setPayload(self._payload)
self.setURL("tscharts/v1/clinicstation/{}/".format(id))
def setAway(self, away):
self._payload["away"] = away
self.setPayload(self._payload)
def setFinished(self, finished):
self._payload["finished"] = finished
self.setPayload(self._payload)
def setActive(self, active):
self._payload["active"] = active
self.setPayload(self._payload)
def setName(self, name):
self._payload["name"] = name
self.setPayload(self._payload)
def setNameES(self, name):
self._payload["name_es"] = name
self.setPayload(self._payload)
def setLevel(self, level):
self._payload["level"] = level
self.setPayload(self._payload)
def setActivePatient(self, patient):
self._payload["activepatient"] = patient
self.setPayload(self._payload)
def setNex
|
tPatient(self, patient):
self._payload["nextpatient"] = patient
self.setPayload(self._payload)
def setAwayTime(self, minutes):
self._payload["awaytime"] = minutes
self.setP
|
ayload(self._payload)
class DeleteClinicStation(ServiceAPI):
def __init__(self, host, port, token, id):
super(DeleteClinicStation, self).__init__()
self.setHttpMethod("DELETE")
self.setHost(host)
self.setPort(port)
self.setToken(token)
self.setURL("tscharts/v1/clinicstation/{}/".format(id))
class TestTSClinicStation(unittest.TestCase):
def setUp(self):
login = Login(host, port, username, password)
ret = login.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue("token" in ret[1])
global token
token = ret[1]["token"]
def testCreateClinicStation(self):
x = CreateClinic(host, port, token, "Ensenada", "02/05/2016", "02/06/2016")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue("id" in ret[1])
clinicid = int(ret[1]["id"])
x = CreateStation(host, port, token, "ENT")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
stationid = int(ret[1]["id"])
# default active and away state
x = CreateClinicStation(host, port, token, clinicid, stationid, name="test1")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
id = int(ret[1]["id"])
x = GetClinicStation(host, port, token)
x.setId(id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue("clinic" in ret[1])
clinicId = int(ret[1]["clinic"])
self.assertTrue(clinicId == clinicid)
self.assertTrue("station" in ret[1])
stationId = int(ret[1]["station"])
self.assertTrue(stationId == stationid)
self.assertTrue("active" in ret[1])
self.assertTrue(ret[1]["active"] == False)
self.assertTrue("finished" in ret[1])
self.assertTrue(ret[1]["finished"] == False)
self.assertTrue("away" in ret[1])
self.assertTrue(ret[1]["away"] == True)
self.assertTrue("name" in ret[1])
self.assertTrue(ret[1]["name"] == "test1")
x = DeleteClinicStation(host, port, token, id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = GetClinicStation(host, port, token)
x.setId(id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404) # not found
# explicit active state
x = CreateClinicStation(host, port, token, clinicid, stationid, active=False, name="test2")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
id = int(ret[1]["id"])
x = GetClinicStation(host, port, token)
x.setId(id)
ret = x.send(timeout=30)
self.assertEqual(ret[
|
prikevs/PasteSite
|
paste/database.py
|
Python
|
mit
| 173
| 0.00578
|
#coding:utf8
from flask.ext.sqlalchemy import SQLAlchemy
from . import app
app.config['SQLALC
|
HEMY_DATABASE_URI'] = 'sqlite:///relative/../../test.db'
db = SQLAl
|
chemy(app)
|
DMPwerkzeug/DMPwerkzeug
|
rdmo/questions/migrations/0001_initial_after_reset.py
|
Python
|
apache-2.0
| 5,793
| 0.003798
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
import rdmo.core.models
class Migration(migrations.Migration):
dependencies = [
('domain', '0001_initial_after_reset'),
]
operations = [
migrations.CreateModel(
name='Catalog',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(verbose_name='created', editable=False)),
('updated', models.DateTimeField(verbose_name='updated', editable=False)),
('order', models.IntegerField(null=True)),
('title_en', models.CharField(max_length=256)),
('title_de', models.CharField(max_length=256)),
],
options={
'ordering': ('order',),
'verbose_name': 'Catalog',
'verbose_name_plural': 'Catalogs',
},
bases=(models.Model, rdmo.core.models.TranslationMixin),
),
migrations.CreateModel(
name='QuestionEntity',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(verbose_name='created', editable=False)),
('updated', models.DateTimeField(verbose_name='updated', editable=False)),
('order', models.IntegerField(null=True)),
('help_en', models.TextField(null=True, blank=True)),
('help_de', models.TextField(null=True, blank=True)),
],
options={
'ordering': ('subsection__section__catalog__order', 'subsection__section__order', 'subsection__order'),
'verbose_name': 'QuestionEntity',
'verbose_name_plural': 'QuestionEntities',
},
bases=(models.Model, rdmo.core.models.TranslationMixin),
),
migrations.CreateModel(
name='Section',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(verbose_name='created', editable=False)),
('updated', models.DateTimeField(verbose_name='updated', editable=False)),
('order', models.IntegerField(null=True)),
('title_en', models.CharField(max_length=256)),
('title_de', models.CharField(max_length=256)),
('catalog', models.ForeignKey(related_name='sections', to='questions.Catalog', on_delete=models.CASCADE)),
],
options={
'ordering': ('catalog__order', 'order'),
'verbose_name': 'Section',
'verbose_name_plural': 'Sections',
},
bases=(models.Model, rdmo.core.models.TranslationMixin),
),
migrations.CreateModel(
name='Subsection',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(verbose_name='created', editable=False)),
('updated', models.DateTimeField(verbose_name='updated', editable=False)),
('order', models.IntegerField(null=True)),
('title_en', models.CharField(max
|
_length=256)),
('title_de', models.CharField(max_length=256)),
('section', models.ForeignKey(related_name='subsections', to='questions.Section', on_delete=models.CASCADE)),
],
options={
'ordering': ('section__catalog__order', 'section__order', 'order'),
'verbose_name': 'Subsection',
|
'verbose_name_plural': 'Subsections',
},
bases=(models.Model, rdmo.core.models.TranslationMixin),
),
migrations.CreateModel(
name='Question',
fields=[
('questionentity_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='questions.QuestionEntity', on_delete=models.CASCADE)),
('text_en', models.TextField()),
('text_de', models.TextField()),
('widget_type', models.CharField(max_length=12, choices=[('text', 'Text'), ('textarea', 'Textarea'), ('yesno', 'Yes/No'), ('checkbox', 'Checkboxes'), ('radio', 'Radio buttons'), ('select', 'Select drop-down'), ('range', 'Range slider'), ('date', 'Date picker')])),
],
options={
'ordering': ('subsection__section__catalog__order', 'subsection__section__order', 'subsection__order'),
'verbose_name': 'Question',
'verbose_name_plural': 'Questions',
},
bases=('questions.questionentity',),
),
migrations.AddField(
model_name='questionentity',
name='attribute_entity',
field=models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='domain.AttributeEntity', null=True),
),
migrations.AddField(
model_name='questionentity',
name='subsection',
field=models.ForeignKey(related_name='entities', to='questions.Subsection', on_delete=models.CASCADE),
),
migrations.AddField(
model_name='question',
name='parent_entity',
field=models.ForeignKey(related_name='questions', blank=True, to='questions.QuestionEntity', null=True, on_delete=models.CASCADE),
),
]
|
iammrdollar/ProjectEulerSolutions
|
Problem 1 - Multiples of 3 and 5/mul_3_5.py
|
Python
|
mit
| 296
| 0.016892
|
# Sum of num divisble by 3 and 5 upto 1000
def sumN(n):
return ((n * (n+1)) // 2)
def sumDivisibleBy(num, upto):
linearUpto = (upto-1)//num
return num * sumN(linearUpto)
upto = int(inpu
|
t())
ans = sumDivisibleBy(3,upto) + sumDi
|
visibleBy(5,upto) - sumDivisibleBy(15,upto)
print(ans)
|
CanalTP/flask-restful
|
examples/todo.py
|
Python
|
bsd-3-clause
| 1,446
| 0.003458
|
from flask import Flask
from flask.ext.restful import reqparse, abort, Api, Resource
app = Flask(__name__)
api = Api(app)
TODOS = {
'todo1': {'task': 'build an API'},
'todo2': {'task': '?????'},
'todo3': {'ta
|
sk': 'profit!'},
}
def abort_if_todo_doesnt_exist(todo_id):
if todo_id not in TODOS:
abort(404, message="Todo {} doesn't exist".format(todo_id))
parser = reqparse.RequestParser()
parser.add_argument('task', type=str)
# Todo
# show a single todo item and lets you delete them
class Todo(Resource):
def get(self, todo_id):
abort_if_todo_doesnt_exist(todo_id)
return TODOS[todo_id]
def delete(self, todo_id):
abort_if_todo_doesnt_exist(t
|
odo_id)
del TODOS[todo_id]
return '', 204
def put(self, todo_id):
args = parser.parse_args()
task = {'task': args['task']}
TODOS[todo_id] = task
return task, 201
# TodoList
# shows a list of all todos, and lets you POST to add new tasks
class TodoList(Resource):
def get(self):
return TODOS
def post(self):
args = parser.parse_args()
todo_id = 'todo%d' % (len(TODOS) + 1)
TODOS[todo_id] = {'task': args['task']}
return TODOS[todo_id], 201
##
## Actually setup the Api resource routing here
##
api.add_resource(TodoList, '/todos')
api.add_resource(Todo, '/todos/<string:todo_id>')
if __name__ == '__main__':
app.run(debug=True)
|
openstack/senlin
|
senlin/tests/unit/cmd/test_conductor.py
|
Python
|
apache-2.0
| 1,992
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from oslo_config import cfg
from senlin.cmd import conductor
from senlin.common import config
from senlin.common import consts
|
from senlin.common imp
|
ort messaging
from senlin.common import profiler
from senlin.conductor import service
from senlin.tests.unit.common import base
CONF = cfg.CONF
class TestConductor(base.SenlinTestCase):
def setUp(self):
super(TestConductor, self).setUp()
@mock.patch('oslo_log.log.setup')
@mock.patch('oslo_log.log.set_defaults')
@mock.patch('oslo_service.service.launch')
@mock.patch.object(config, 'parse_args')
@mock.patch.object(messaging, 'setup')
@mock.patch.object(profiler, 'setup')
@mock.patch.object(service, 'ConductorService')
def test_main(self, mock_service, mock_profiler_setup,
mock_messaging_setup, mock_parse_args, mock_launch,
mock_log_set_defaults, mock_log_setup):
conductor.main()
mock_parse_args.assert_called_once()
mock_log_setup.assert_called_once()
mock_log_set_defaults.assert_called_once()
mock_messaging_setup.assert_called_once()
mock_profiler_setup.assert_called_once()
mock_service.assert_called_once_with(
mock.ANY, consts.CONDUCTOR_TOPIC
)
mock_launch.assert_called_once_with(
mock.ANY, mock.ANY, workers=1, restart_method='mutate'
)
|
skosukhin/spack
|
var/spack/repos/builtin/packages/htop/package.py
|
Python
|
lgpl-2.1
| 1,706
| 0.000586
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1
|
, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License fo
|
r more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Htop(AutotoolsPackage):
"""htop is an interactive text-mode process viewer for Unix systems."""
homepage = "https://github.com/hishamhm/htop"
url = "https://hisham.hm/htop/releases/2.0.2/htop-2.0.2.tar.gz"
list_url = "https://hisham.hm/htop/releases"
list_depth = 1
version('2.0.2', '7d354d904bad591a931ad57e99fea84a')
depends_on('ncurses')
def configure_args(self):
return ['--enable-shared']
|
sbarakat/graph-partitioning
|
graph_partitioning/partitioners/patoh/patoh.py
|
Python
|
mit
| 5,305
| 0.005844
|
import ctypes
import graph_partitioning.partitioners.utils as putils
from graph_partitioning.partitioners.patoh.parameters import PATOHParameters
'''
Usage:
# Load the library
libPath = 'path/to/libpatoh.dylib' # .so for linux
lib = LibPatoh(libraryPath = libPath)
lib.load()
# Check library is Loaded
if lib.libIsLoaded() == False:
throw Exception(...)
# Prepare the data for partitioning
G = nx.Graph()
... load data into G ...
fixedNodes = None
... if some of the nodes are already fixed ...
fixedNodes = [-1 -1 0 -1 -1 2 -1 ... ]
data = PatohData()
data.fromNetworkxGraph(G, num_partitions = 4, partvec = partvec)
# Perform partitioning
lib.initializeParameters(data, num_partitions)
if lib.checkUserParameters(data, verbose = True):
if lib.alloc(data) == True:
if lib.part(data) == True:
# do something with partition data...
# free memory
lib.free(data)
'''
class LibPatoh(putils.CLibInterface):
def __init__(self, libraryP
|
ath = None):
super().__init__(libraryPath=librar
|
yPath)
def _getDefaultLibPath(self):
return putils.defaultPATOHLibraryPath()
def _loadLibraryFunctions(self):
self.PATOH_Version = self.clib.Patoh_VersionStr
self.PATOH_Version.restype = (ctypes.c_char_p)
self.PATOH_InitializeParameters = self.clib.Patoh_Initialize_Parameters
self.PATOH_InitializeParameters.argtypes = (ctypes.POINTER(PATOHParameters), ctypes.c_int, ctypes.c_int)
self.PATOH_checkUserParameters = self.clib.Patoh_Check_User_Parameters
self.PATOH_checkUserParameters.argtypes = (ctypes.POINTER(PATOHParameters), ctypes.c_int)
self.PATOH_Alloc = self.clib.Patoh_Alloc
self.PATOH_Alloc.argtypes = (ctypes.POINTER(PATOHParameters), ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p)
self.PATOH_Part = self.clib.Patoh_Part
self.PATOH_Part.argtypes = (ctypes.POINTER(PATOHParameters), ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p)
self.PATOH_Free = self.clib.Patoh_Free
self.cfree = self.clib.free
self.cfree.argtypes = (ctypes.c_void_p,)
def version(self):
return self.PATOH_Version().decode('utf-8')
def initializeParameters(self, patohData, num_partitions = 2):
if(isinstance(num_partitions, int) == False):
num_partitions = 2
patohData.params = PATOHParameters()
ok = self.PATOH_InitializeParameters(ctypes.byref(patohData.params), 1, 0)
if(ok == 0):
patohData.params._k = num_partitions
return True
else:
patohData.params = None
return False
def checkUserParameters(self, patohData, verbose = True):
if (isinstance(patohData.params, PATOHParameters) == False):
print('Cannot check parameters as params is not of type PATOHParameters')
return False
# check verbosity mode
v = 0
if verbose == True:
v = 1
# perform parameter check
ok = self.PATOH_checkUserParameters(ctypes.byref(patohData.params), v)
if(ok == 0):
#print('User Parameters Valid')
return True
else:
print('Error in the user parameters. Use verbose mode for greater details.')
return False
def alloc(self, patohData):
#if (isinstance(patohData, patdata.PatohData) == False):
# return False
#PPaToH_Parameters pargs, int _c, int _n, int _nconst, int *cwghts, int *nwghts, int *xpins, int *pins
ok = self.PATOH_Alloc(ctypes.byref(patohData.params), patohData._c, patohData._n, patohData._nconst, patohData._cwghts.ctypes, patohData._nwghts.ctypes, patohData._xpins.ctypes, patohData._pins.ctypes)
if (ok == 0):
return True
return False
def part(self, patohData):
'''
int PaToH_Part(PPaToH_Parameters pargs, int _c, int _n, int _nconst, int useFixCells,
int *cwghts, int *nwghts, int *xpins, int *pins, float *targetweights,
int *partvec, int *partweights, int *cut);
'''
cut_val = ctypes.c_int(patohData.cut)
cut_addr = ctypes.addressof(cut_val)
ok = self.PATOH_Part(ctypes.byref(patohData.params), patohData._c, patohData._n, patohData._nconst, patohData.useFixCells, patohData._cwghts.ctypes, patohData._nwghts.ctypes, patohData._xpins.ctypes, patohData._pins.ctypes, patohData._targetweights.ctypes, patohData._partvec.ctypes, patohData._partweights.ctypes, cut_addr)
if (ok == 0):
# get value back
patohData.cut = cut_val
return True
return False
def free(self, patohData):
#self.cfree(patohData._cwghts.ctypes)
#self.cfree(patohData._nwghts.ctypes)
#self.cfree(patohData._xpins.ctypes)
#self.cfree(patohData._pins.ctypes)
#self.cfree(patohData._partweights.ctypes)
#self.cfree(patohData._partvec.ctypes)
ok = self.PATOH_Free()
if ok == 0:
return True
return False
|
krischer/prov
|
prov/tests/attributes.py
|
Python
|
mit
| 5,400
| 0.000926
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from prov.model import *
EX_NS = Namespace('ex', 'http://example.org/')
EX_OTHER_NS = Namespace('other', 'http://example.org/')
class TestAttributesBase(object):
"""This is the base class for testing support for vari
|
ous datatypes.
It is not runnable and needs to be included in a subclass of RoundTripTestCase.
"""
attribute_values = [
"un lieu",
Literal("un lieu", langtag='fr'),
Literal("a place", langtag='en'),
Literal(1, XSD_INT),
Literal(1, XSD_LONG),
Literal(1, XSD_SHORT),
Literal(2.0, XSD_DOUBLE),
Literal(1.0, XSD_FLOAT),
Literal(10, XSD_DECIMAL),
True,
|
False,
Literal(10, XSD_BYTE),
Literal(10, XSD_UNSIGNEDINT),
Literal(10, XSD_UNSIGNEDLONG),
Literal(10, XSD_INTEGER),
Literal(10, XSD_UNSIGNEDSHORT),
Literal(10, XSD_NONNEGATIVEINTEGER),
Literal(-10, XSD_NONPOSITIVEINTEGER),
Literal(10, XSD_POSITIVEINTEGER),
Literal(10, XSD_UNSIGNEDBYTE),
Identifier('http://example.org'),
Literal('http://example.org', XSD_ANYURI),
EX_NS['abc'],
EX_OTHER_NS['abcd'],
Namespace('ex', 'http://example4.org/')['zabc'],
Namespace('other', 'http://example4.org/')['zabcd'],
datetime.datetime.now(),
Literal(datetime.datetime.now().isoformat(), XSD_DATETIME)
]
def new_document(self):
return ProvDocument()
def run_entity_with_one_type_attribute(self, n):
document = self.new_document()
document.entity(EX_NS['et%d' % n], {'prov:type': self.attribute_values[n]})
self.assertRoundTripEquivalence(document)
def test_entity_with_one_type_attribute_0(self):
self.run_entity_with_one_type_attribute(0)
def test_entity_with_one_type_attribute_1(self):
self.run_entity_with_one_type_attribute(1)
def test_entity_with_one_type_attribute_2(self):
self.run_entity_with_one_type_attribute(2)
def test_entity_with_one_type_attribute_3(self):
self.run_entity_with_one_type_attribute(3)
def test_entity_with_one_type_attribute_4(self):
self.run_entity_with_one_type_attribute(4)
def test_entity_with_one_type_attribute_5(self):
self.run_entity_with_one_type_attribute(5)
def test_entity_with_one_type_attribute_6(self):
self.run_entity_with_one_type_attribute(6)
def test_entity_with_one_type_attribute_7(self):
self.run_entity_with_one_type_attribute(7)
def test_entity_with_one_type_attribute_8(self):
self.run_entity_with_one_type_attribute(8)
def test_entity_with_one_type_attribute_9(self):
self.run_entity_with_one_type_attribute(9)
def test_entity_with_one_type_attribute_10(self):
self.run_entity_with_one_type_attribute(10)
def test_entity_with_one_type_attribute_11(self):
self.run_entity_with_one_type_attribute(11)
def test_entity_with_one_type_attribute_12(self):
self.run_entity_with_one_type_attribute(12)
def test_entity_with_one_type_attribute_13(self):
self.run_entity_with_one_type_attribute(13)
def test_entity_with_one_type_attribute_14(self):
self.run_entity_with_one_type_attribute(14)
def test_entity_with_one_type_attribute_15(self):
self.run_entity_with_one_type_attribute(15)
def test_entity_with_one_type_attribute_16(self):
self.run_entity_with_one_type_attribute(16)
def test_entity_with_one_type_attribute_17(self):
self.run_entity_with_one_type_attribute(17)
def test_entity_with_one_type_attribute_18(self):
self.run_entity_with_one_type_attribute(18)
def test_entity_with_one_type_attribute_19(self):
self.run_entity_with_one_type_attribute(19)
def test_entity_with_one_type_attribute_20(self):
self.run_entity_with_one_type_attribute(20)
def test_entity_with_one_type_attribute_21(self):
self.run_entity_with_one_type_attribute(21)
def test_entity_with_one_type_attribute_22(self):
self.run_entity_with_one_type_attribute(22)
def test_entity_with_one_type_attribute_23(self):
self.run_entity_with_one_type_attribute(23)
def test_entity_with_one_type_attribute_24(self):
self.run_entity_with_one_type_attribute(24)
def test_entity_with_one_type_attribute_25(self):
self.run_entity_with_one_type_attribute(25)
def test_entity_with_one_type_attribute_26(self):
self.run_entity_with_one_type_attribute(26)
def test_entity_with_one_type_attribute_27(self):
self.run_entity_with_one_type_attribute(27)
def test_entity_with_multiple_attribute(self):
document = self.new_document()
attributes = [
(EX_NS['v_%d'% i], value) for i, value in enumerate(self.attribute_values)
]
document.entity(EX_NS['emov'], attributes)
self.assertRoundTripEquivalence(document)
def test_entity_with_multiple_value_attribute(self):
document = self.new_document()
attributes = [
('prov:value', value) for i, value in enumerate(self.attribute_values)
]
document.entity(EX_NS['emv'], attributes)
self.assertRoundTripEquivalence(document)
|
portnov/sverchok
|
utils/sv_IO_monad_helpers.py
|
Python
|
gpl-3.0
| 3,948
| 0.002786
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
import json
from sverchok.utils.logging import error
def pack_monad(node, node_items, groups_dict, create_dict_of_tree):
"""
we can not rely on .items() to be present for various reasons, so we must gather
something to fill .params with - due to dynamic nature of node.
"""
name = node.monad.name
node_items['all_props'] = node.monad.get_all_props()
node_items['monad'] = name
node_items['cls_dict'] = {}
node_items['cls_dict']['cls_bl_idname'] = node.bl_idname
for template in ['input_template', 'output_template']:
node_items['cls_dict'][template] = getattr(node, template)
if name not in groups_dict:
group_ng = bpy.data.node_groups[name]
group_dict = create_dict_of_tree(group_ng)
group_dict['bl_idname'] = group_ng.bl_idname
group_dict['cls_bl_idname'] = node.bl_idname
group_json = json.dumps(group_dict)
groups_dict[name] = group_json
# [['Y', 'StringsSocket', {'prop_name': 'y'}], [....
for idx, (socket_name, socket_type, prop_dict) in enumerate(node.input_template):
socket = node.inputs[idx]
if not socket.is_linked and prop_dict:
prop_name = prop_dict['prop_name']
v = getattr(node, prop_name)
if not isinstance(v, (float, int, str)):
v = v[:]
node_items[prop_name] = v
def u
|
npack_monad(nodes, node_ref):
params = node_ref.get('params')
if params:
socket_prop_data = params.get('all_props')
monad_name = params.get('monad')
monad = bpy.data.node_groups[monad_name]
if socket_prop_data:
# including this to keep bw comp for trees that don't include this info.
monad.set_all_props(socket_prop_data)
cls_ref = monad.u
|
pdate_cls()
node = nodes.new(cls_ref.bl_idname)
# -- addition 1 --------- setting correct properties on sockets.
cls_dict = params.get('cls_dict')
input_template = cls_dict['input_template']
for idx, (sock_name, sock_type, sock_props) in enumerate(input_template):
socket_reference = node.inputs[idx]
if sock_props:
for prop, val in sock_props.items():
setattr(socket_reference, prop, val)
# -- addition 2 --------- force push param values
# -- (this step is skipped by apply_core_props because this node has a cls_dict)
for prop_data in ('float_props', 'int_props'):
data_list = socket_prop_data.get(prop_data)
if not data_list:
continue
for k, v in data_list.items():
if hasattr(node, k):
if k in params:
setattr(node, k, params[k])
# else:
# print(k, 'not in', params)
#else:
# print('node name:', node, node.name, 'has no property called', k, 'yet..')
# node.output_template = cls_dict['output_template']
return node
else:
error('no parameters found! .json might be broken')
|
kumar303/zamboni
|
mkt/developers/tests/test_tasks.py
|
Python
|
bsd-3-clause
| 24,331
| 0
|
import codecs
import json
import os
import shutil
import socket
import subprocess
import tempfile
from contextlib import contextmanager
from cStringIO import StringIO
from django.conf import settings
from django.core import mail
from django.core.files.storage import default_storage as storage
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
import mock
from nose.tools import eq_, ok_
from PIL import Image
from requests import RequestException
import mkt
import mkt.site.tests
from mkt.users.models import UserProfile
from mkt.developers import tasks
from mkt.files.models import FileUpload
from mkt.site.fixtures import fixture
from mkt.site.tests.test_utils_ import get_image_path
from mkt.site.utils import app_factory, ImageCheck
from mkt.submit.tests.test_views import BaseWebAppTest
from mkt.webapps.models import AddonExcludedRegion as AER
from mkt.webapps.models import Preview, Webapp
def test_resize_icon_shrink():
""" Image should be shrunk so that the longest side is 32px. """
resize_size = [32]
final_size = [(32, 12)]
_uploader(resize_size, final_size)
def test_resize_icon_enlarge():
""" Image stays the same, since the new size is bigger than both sides. """
resize_size = [1000]
final_size = [(339, 128)]
_uploader(resize_size, final_size)
def test_resize_icon_same():
""" Image stays the same, since the new size is the same. """
resize_size = [339]
final_size = [(339, 128)]
_uploader(resize_size, final_size)
def test_resize_icon_list():
""" Resize multiple images at once. """
resize_size = [32, 82, 100]
final_size = [(32, 12), (82, 30), (100, 37)]
_uploader(resize_size, final_size)
def _uploader(resize_size, final_size):
img = get_image_path('mozilla.png')
original_size = (339, 128)
for rsize, fsize in zip(resize_size, final_size):
dest_name = os.path.join(settings.ADDON_ICONS_PATH, '1234')
src = tempfile.NamedTemporaryFile(mode='r+w+b', suffix='.png',
delete=False)
# resize_icon removes the original, copy it to a tempfile and use that.
shutil.copyfile(img, src.name)
# Sanity check.
with storage.open(src.name) as fp:
src_image = Image.open(fp)
src_image.load()
eq_(src_image.size, original_size)
val = tasks.resize_icon(src.name, dest_name, resize_size, locally=True)
eq_(val, {'icon_hash': 'bb362450'})
with storage.open('%s-%s.png' % (dest_name, rsize)) as fp:
dest_image = Image.open(fp)
dest_image.load()
# Assert that the width is always identical.
eq_(dest_image.size[0], fsize[0])
# Assert that the height can be a wee bit fuzzy.
assert -1 <= dest_image.size[1] - fsize[1] <= 1, (
'Got width %d, expected %d' % (
fsize[1], dest_image.size[1]))
if os.path.exists(dest_image.filename):
os.remove(dest_image.filename)
assert not os.path.exists(dest_image.filename)
assert not os.path.exists(src.name)
class TestPngcrushImage(mkt.site.tests.TestCase):
def setUp(self):
img = get_image_path('mozilla.png')
self.src = tempfile.NamedTemporaryFile(mode='r+w+b', suffix=".png",
delete=False)
shutil.copyfile(img, self.src.name)
patcher = mock.patch('subprocess.Popen')
self.mock_popen = patcher.start()
attrs = {
'returncode': 0,
'communicate.return_value': ('ouput', 'error')
}
self.mock_popen.return_value.configure_mock(**attrs)
self.addCleanup(patcher.stop)
def tearDown(self):
os.remove(self.src.name)
@mock.patch('shutil.move')
def test_pngcrush_image_is_called(self, mock_move):
name = self.src.name
expected_suffix = '.opti.png'
expected_cmd = ['pngcrush', '-q', '-rem', 'alla', '-brute', '-reduce',
'-e', expected_suffix, name]
rval = tasks.pngcrush_image(name)
self.mock_popen.assert_called_once_with(
expected_cmd, stdin=subprocess.PIPE, stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
mock_move.assert_called_once_with(
'%s%s' % (os.path.splitext(name)[0], expected_suffix), name)
eq_(rval, {'image_hash': 'bb362450'})
@mock.patch('mkt.webapps.models.Webapp.update')
@mock.patch('shutil.move')
def test_set_modified(self, mock_move, update_mock):
"""Test passed instance is updated with the hash."""
name = self.src.name
obj = app_factory()
ret = tasks.pngcrush_image(name, 'some_hash', set_modified_on=[obj])
ok_('some_hash' in ret)
eq_(update_mock.call_args_list[-1][1]['some_hash'], ret['some_hash'])
ok_('modified' in update_mock.call_args_list[-1][1])
class TestValidator(mkt.site.tests.TestCase):
def setUp(self):
self.upload = FileUpload.objects.create()
assert not self.upload.valid
def get_upload(self):
return FileU
|
pload.objects.get(pk=self.upload.pk)
@mock.patch('mkt.developers.tasks.run_validator')
def test_pass_validation(self, _mock):
_mock.return_value = '{"errors": 0}'
tasks.validator(self.upload.pk)
assert self.get_upload().valid
@mock.patch('mkt.developers.tasks.run_validator')
def test_fail_valid
|
ation(self, _mock):
_mock.return_value = '{"errors": 2}'
tasks.validator(self.upload.pk)
assert not self.get_upload().valid
@mock.patch('mkt.developers.tasks.run_validator')
def test_validation_error(self, _mock):
_mock.side_effect = Exception
eq_(self.upload.task_error, None)
tasks.validator(self.upload.pk)
error = self.get_upload().task_error
assert error is not None
assert error.startswith('Traceback (most recent call last)'), error
@mock.patch('mkt.developers.tasks.validate_app')
@mock.patch('mkt.developers.tasks.storage.open')
def test_validate_manifest(self, _open, _mock):
_open.return_value = StringIO('')
_mock.return_value = '{"errors": 0}'
tasks.validator(self.upload.pk)
assert _mock.called
@mock.patch('mkt.developers.tasks.validate_packaged_app')
@mock.patch('zipfile.is_zipfile')
def test_validate_packaged_app(self, _zipfile, _mock):
_zipfile.return_value = True
_mock.return_value = '{"errors": 0}'
tasks.validator(self.upload.pk)
assert _mock.called
storage_open = storage.open
def _mock_hide_64px_icon(path, *args, **kwargs):
"""
A function that mocks `storage.open` and throws an IOError if you try to
open a 128x128px icon.
"""
if '128' in path:
raise IOError('No 128px icon for you!')
return storage_open(path, *args, **kwargs)
@override_settings(
PREVIEW_FULL_PATH='/tmp/uploads-tests/previews/full/%s/%d.%s',
PREVIEW_THUMBNAIL_PATH='/tmp/uploads-tests/previews/thumbs/%s/%d.png')
class TestResizePreview(mkt.site.tests.TestCase):
fixtures = fixture('webapp_337141')
def setUp(self):
# Make sure there are no leftover files in the test directory before
# launching tests that depend on the files presence/absence.
shutil.rmtree('/tmp/uploads-tests/previews/', ignore_errors=True)
def get_image(self, filename):
"""Copy image to tmp and return tmp path.
We do this because the task `resize_preview` removes the src file when
finished.
"""
src = get_image_path(filename)
dst = os.path.join(settings.TMP_PATH, 'preview', filename)
shutil.copy(src, dst)
return dst
def test_preview(self):
addon = Webapp.objects.get(pk=337141)
preview = Preview.objects.create(addon=addon)
src = self.get_image('preview.jpg')
tasks.resize_preview(src, preview.pk)
preview = preview.reload()
eq_(preview.image_size, [400, 533])
eq_(preview.thumbnail_size, [100, 133])
eq
|
54lihaoxin/GoogleFooBar
|
src/guard_game/test_suite.py
|
Python
|
apache-2.0
| 938
| 0.007463
|
import sys
import solution
# from classes import ?
class TestSuite:
def run(self):
self.test000()
self.test001()
self.tes
|
t002()
# self.test003()
# self.test004()
def test000(self):
print 'test 000\n'
n = 13
r = solution.answer(n)
print ' input:\t', n
print ' expect:\t', 4
print ' output:\t', r
print
def test001(self):
print 'test 002\n'
n = 1235
r = solution.answer(n)
print ' input:\t', n
|
print ' expect:\t', 2
print ' output:\t', r
print
def test002(self):
print 'test 002\n'
n = 6471289
r = solution.answer(n)
print ' input:\t', n
print ' expect:\t', 1
print ' output:\t', r
print
def main(argv):
TestSuite().run()
if __name__ == '__main__':
main(sys.argv)
|
stregatto/fabric_lib
|
apt.py
|
Python
|
gpl-2.0
| 651
| 0.003072
|
from fabric.api import *
from fabric.utils import *
from fabric.contrib import *
class Apt(
|
object):
def __init__(self):
return
def update(self):
cmd = 'sudo apt update'
run(cmd)
print(cmd)
def purge(self, package):
cmd = 'sudo apt purge -y %(package)s' % {'package': package}
# print(cmd)
run(cmd)
def upgrade(s
|
elf):
cmd = 'sudo apt upgrade -y'
run(cmd)
# print(cmd)
def install(self, package):
if package != None:
cmd = 'sudo apt -y install %(package)s' % {'package': package}
run(cmd)
# print(cmd)
|
mclumd/pelawak
|
pelawak/wsgi.py
|
Python
|
mit
| 482
| 0
|
"""
WSGI config for pelawak project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, s
|
ee
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pelawak.settings")
application = get_wsgi_applicat
|
ion()
application = DjangoWhiteNoise(application)
|
bkabrda/anymarkup-core
|
test/test_serialize.py
|
Python
|
bsd-3-clause
| 3,156
| 0.000951
|
# -*- coding: utf-8 -*-
import io
import os
import pytest
import six
from anymarkup_core import *
from test import *
class TestSerialize(object):
"""Note: testing serialization is a bit tricky, since serializing dicts can result
in different order of values in serialized string in different runs.
That means that we can't just test whether the serialized string equals to expected
string. To solve this, we rather parse the serialized string back and make sure
that it equals the original structure.
"""
fixtures = os.path.join(os.path.dirname(__file__), 'fixtures')
def _read_decode(self, file):
if isinstance(file, six.string_types):
file = open(file, 'rb')
else:
file.seek(0)
return file.read().decode('utf-8')
@pytest.mark.parametrize(('struct', 'format'), [
(example_as_dict, 'ini'),
(example_as_dict, 'json'),
(example_as_dict, 'json5'),
(toml_example_as_dict, 'toml'),
(example_as_ordered_dict, 'xml'),
(example_as_dict, 'yaml'),
(example_as_ordered_dict, 'yaml'),
])
def test_serialize_basic(self, struct, format):
serialized = serialize(struct, format)
parsed_back = parse(serialized, format)
assert
|
parsed_back == struct
assert type(parsed_back) == type(struct)
def test_serialize_works_with_wb_opened_file(self, tmpdir):
f = os.path.join(str(tmpdir), 'foo.xml')
fhandle = open(f, 'wb+')
serialize(exampl
|
e_as_ordered_dict, 'xml', fhandle)
assert self._read_decode(fhandle) == example_xml
def test_serialize_raises_with_unicode_opened_file(self, tmpdir):
# on Python 2, this can only be simulated with io.open
f = os.path.join(str(tmpdir), 'foo.json')
fhandle = io.open(f, 'w+', encoding='utf-8')
with pytest.raises(AnyMarkupError):
serialize(example_as_dict, 'json', fhandle)
@pytest.mark.parametrize(('struct', 'fmt', 'fname'), [
(example_as_dict, None, 'example.ini'),
(example_as_dict, None, 'example.json'),
(example_as_dict, 'json5', 'example.json5'),
(toml_example_as_dict, 'toml', 'example.toml'),
(example_as_ordered_dict, None, 'example.xml'),
(example_as_dict, None, 'example.yaml'),
(example_as_ordered_dict, None, 'example_ordered.yaml'),
])
def test_serialize_file_basic(self, struct, fmt, fname, tmpdir):
f = os.path.join(str(tmpdir), fname)
serialize_file(struct, f)
parsed_back = parse(self._read_decode(f), fmt)
assert parsed_back == struct
assert type(parsed_back) == type(struct)
def test_serialize_file_format_overrides_extension(self, tmpdir):
f = os.path.join(str(tmpdir), 'foo.ini')
serialize_file(example_as_dict, f, 'json')
assert parse(self._read_decode(f)) == example_as_dict
def test_parse_and_serialize_yaml_multiline_string(self):
# https://github.com/bkabrda/anymarkup-core/issues/1
inp = b'foo: |-\n line1\n line2\n line3\n'
assert serialize(parse(inp), 'yaml') == inp
|
CellProfiling/cam_acq
|
camacq/plugins/automations/__init__.py
|
Python
|
apache-2.0
| 10,741
| 0.000745
|
"""Handle automations."""
# Copyright 2013-2017 The Home Assistant Authors
# https://github.com/home-assistant/home-assistant/blob/master/LICENSE.md
# This file was modified by The Camacq Authors.
import logging
from collections import deque
from functools import partial
import voluptuous as vol
from camacq.exceptions import TemplateError
from camacq.helper import BASE_ACTION_SCHEMA, get_module, has_at_least_one_key
from camacq.helper.template import make_template, render_template
from camacq.const import CAMACQ_STOP_EVENT, CONF_DATA, CONF_ID
_LOGGER = logging.getLogger(__name__)
CONF_AUTOMATIONS = "automations"
CONF_ACTION = "action"
CONF_CONDITION = "condition"
CONF_CONDITIONS = "conditions"
CONF_NAME = "name"
CONF_TRIGGER = "trigger"
CONF_TYPE = "type"
ENABLED = "enabled"
NAME = "name"
ACTION_DELAY = "delay"
ACTION_TOGGLE = "toggle"
DATA_AUTOMATIONS = "automations"
TRIGGER_ACTION_SCHEMA = vol.Schema(
[
{
vol.Required(CONF_TYPE): vol.Coerce(str),
vol.Required(CONF_ID): vol.Coerce(str),
vol.Optional(CONF_DATA, default={}): dict,
}
],
)
CONDITION_SCHEMA = vol.All(
has_at_least_one_key(CONF_TYPE, CONF_CONDITION),
{
# pylint: disable=no-value-for-parameter
vol.Inclusive(CONF_TYPE, "condition"): vol.All(
vol.Upper, vol.In(["AND", "OR"])
),
vol.Inclusive(CONF_CONDITIONS, "condition"): [
# pylint: disable=unnecessary-lambda
lambda value: CONDITION_SCHEMA(value)
],
vol.Exclusive(CONF_CONDITION, "condition"): vol.Coerce(str),
},
)
CONFIG_SCHEMA = vol.Schema(
[
{
vol.Required(CONF_NAME): vol.Coerce(str),
vol.Required(CONF_TRIGGER): TRIGGER_ACTION_SCHEMA,
vol.Required(CONF_ACTION): TRIGGER_ACTION_SCHEMA,
vol.Optional(
CONF_CONDITION, default={CONF_CONDITION: "true"}
): CONDITION_SCHEMA,
}
]
)
async def setup_module(center, config):
"""Set up automations package.
Parameters
----------
center : Center instance
The Center instance.
config : dict
The config dict.
"""
_process_automations(center, config)
automations = center.data[DATA_AUTOMATIONS]
async def handle_action(**kwargs):
"""Enable or disable an automation."""
name = kwargs[NAME]
automation = automations[name]
enabled = kwargs.get(ENABLED, not automation.enabled)
if enabled:
automation.enable()
else:
automation.disable()
toggle_action_schema = BASE_ACTION_SCHEMA.extend(
{
vol.Required(NAME): vol.All(vol.Coerce(str), vol.In(automations)),
ENABLED: vol.Boolean(), # pylint: disable=no-value-for-parameter
}
)
# register action to enable/disable automation
center.actions.register(
"automations", ACTION_TOGGLE, handle_action, toggle_action_schema
)
def _process_automations(center, config):
"""Process automations from config."""
automations = center.data.setdefault(DATA_AUTOMATIONS, {})
conf = config[CONF_AUTOMATIONS]
for block in conf:
name = block[CONF_NAME]
_LOGGER.debug("Setting up automation %s", name)
action_sequence = _get_actions(center, block[CONF_ACTION])
cond_func = _process_condition(center, block[CONF_CONDITION])
# use partial to get a function with args to call later
attach_triggers = partial(_process_trigger, center, block[CONF_TRIGGER])
automations[name] = Automation(
center, name, attach_triggers, cond_func, action_sequence
)
def _get_actions(center, config_block):
"""Return actions."""
actions = (TemplateAction(center, action_conf) for action_conf in config_block)
return ActionSequence(center, actions)
def _process_condition(center, config_block):
"""Return a function that parses the condition."""
if CONF_TYPE in config_block:
checks = []
condition_type = config_block[CONF_TYPE]
conditions = config_block[CONF_CONDITIONS]
for cond in conditions:
check = _process_condition(center, cond)
checks.append(check)
return make_checker(condition_type, checks)
data = config_block[CONF_CONDITION]
template = make_template(center, data)
return partial(render_template, template)
def make_checker(condition_type, checks):
"""Return a function to check condition."""
def check_condition(variables):
"""Return True if all or any condition(s) pass."""
if condition_type.lower() == "and":
return all(template_check(check(variables)) for check in checks)
if condition_type.lower() == "or":
return any(template_check(check(variables)) for check in checks)
return False
return check_condition
def template_check(value):
"""Check if a rendered template string equals true.
If value is not a string, return value as is.
"""
if isinstance(value, str):
return value.lower() == "true"
return value
def _process_trigger(center, config_block, trigger):
"""Process triggers for an automation."""
remove_funcs = []
for conf in config_block:
trigger_id = conf[CONF_ID]
trigger_type = conf[CONF_TYPE]
trigger_mod = get_module(__name__, trigger_type)
if not trigger_mod:
continue
_LOGGER.debug("Setting up trigger %s", trigger_id)
remove = trigger_mod.handle_trigger(center, conf, trigger)
if not remove:
_LOGGER.error("Setting up trigger %s failed", trigger_id)
continue
remove_funcs.append(remove)
if not remove_funcs:
return None
def remove_triggers():
"""Remove attached triggers."""
for remove in remove_funcs:
remove()
return remove_triggers
class Automation:
"""Automation class."""
# pylint: disable=too-many-arguments
def __init__(
self, center, name, attach_triggers, cond_func, action_sequence, enabled=True
):
"""Set up instance."""
self._center = center
self.name = name
self.enable
|
d = False
self._action_sequence = action_sequence
self._attach_triggers = attach_triggers
self._detach_triggers = None
self._cond_func = cond_func
if enabled:
self.enable()
def __repr__(self):
"""Return the representation."""
return (
f"Automation(center={self._center}, name={self.name}, "
f"attach_triggers={self._attach_triggers}, cond_func={self._cond_func}, "
f"action_sequence={self._action_seque
|
nce}, enabled={self.enabled})"
)
def enable(self):
"""Enable automation."""
if self.enabled:
return
self._detach_triggers = self._attach_triggers(self.trigger)
self.enabled = True
def disable(self):
"""Disable automation."""
if not self.enabled:
return
if self._detach_triggers is not None:
self._detach_triggers()
self._detach_triggers = None
self.enabled = False
async def trigger(self, variables):
"""Run actions of this automation."""
variables["samples"] = self._center.samples
_LOGGER.debug("Triggered automation %s", self.name)
try:
cond = self._cond_func(variables)
except TemplateError as exc:
_LOGGER.error("Failed to render condition for %s: %s", self.name, exc)
return
if cond:
_LOGGER.debug("Condition passed for %s", self.name)
await self._action_sequence(variables)
class ActionSequence:
"""Represent a sequence of actions."""
# pylint: disable=too-few-public-methods
def __init__(self, center, actions):
"""Set up instance."""
self._center = center
self.actions = list(actions) # copy to list to make sure it's a list
async def __call__(self, variables):
"""Start action sequence."""
|
teamtachyon/Quillpad-Server
|
QuillTrainer.py
|
Python
|
bsd-3-clause
| 4,979
| 0.022896
|
# -*- coding: utf-8 -*-
# @Date : Jul 13, 2016
# @Author : Ram Prakash, Sharath Puranik
# @Version : 1
import CART
from QuillLanguage import QuillLanguage
import pickle
class QuillTrainer(object):
def __init__(self,quillLang):
if isinstance(quillLang,QuillLanguage):
self.language = quillLang
else:
raise Exception,'Invalid parameter. Not of type QuillLanguage'
def train(self,uWords,scope=4,splRulesFlag=True):
self.language.setKnowledge(self.__buildKeyToCARTMap(uWords,scope,splRulesFlag,"primary"),"primary")
self.language.setKnowledge(self.__buildKeyToCARTMap(uWords,scope,splRulesFlag,"predictive"),"predictive")
return self.language
def getLanguage(self):
return self.language
def store(self,fname=None):
if fname == None:
fname = self.language.language+'.qil'
keyToCARTMap = self.language.keyToCARTMap
keyToCARTMapPrimary = self.language.keyToCARTMapPrimary
f = file(fname,'w')
f.write('<QuillLanguage lang="%s" script="%s" deffont="%s" epsilon="%s">\n'%(self.language.language,self.language.script,self.language.default_font,self.language.epsilon.encode('utf-8')))
for (key,keyCart) in keyToCARTMap.items():
keyCart.storeCart(f,"predictive")
for (key,keyCart) in keyToCARTMapPrimary.items():
keyCart.storeCart(f,"primary")
f.write('</QuillLanguage>')
f.close()
def load(self, trainedData):
pass
def __buildKeyToCARTMap ( self, uWords,scope=4,splRulesFlag=True,type="predictive" ):
contextLen = scope
splRules = []
if splRulesFlag == True:
splRules = self.language.getSpecialRules(type)
keyToCARTMap = {}
data={}
for uWord in uWords:
try:
trainPairs = self.language.getTrainingPairs(uWord,type)
except KeyError:
trainPairs = None
if trainPairs != None:
data1 = CART.CART.prepareTrainingData(trainPairs,contextLen,1)
for key in data1.keys():
if data.has_key(key):
data[key].extend( data1[key] )
else:
data.update({key:data1[key]})
if type == "primary":
contextPrefOrder = [0,1,2,-1,3,-2,4,-3-4]
elif type == "predictive":
contextPrefOrder = None
for key in data.keys():
keyCart = CART.CART(key,data[key],contextLen, splRules,contextPrefOrder)
keyCart.build()
keyToCARTMap.update( {key:keyCart } )
return keyToCARTMap
def createTrainingData( self, uWords,scope=4,splRulesType='predictive',fname = None ):
contextLen = scope
splRules = []
if splRulesType != None:
splRules = self.language.getSpecialRules(splRulesType)
if fname == None:
fname = self.language.
|
language+'.data'
f = file(fname,'w')
f.write('<QuillTrainData lang="%s" script="%s" deffont="%s" epsilon="%s" context-len="%s">\n'%(self.language.language,self.language.script,self.language.default_font,self.language.epsilon.encode('utf-8'),scope))
f.write('\t<SpecialRules>\n')
for eachRule in splRules:
f.write('\t\t<SpecialRule>')
|
f.write(repr(eachRule))
f.write('</SpecialRule>')
f.write('\n')
f.write('\t\t</SpecialRules>\n')
keyToCARTMap = {}
data={}
for uWord in uWords:
try:
trainPairs = self.language.getTrainingPairs(uWord)
except KeyError:
trainPairs = None
if trainPairs != None:
data1 = CART.CART.prepareTrainingData(trainPairs,contextLen,1)
for key in data1.keys():
if data.has_key(key):
data[key].extend( data1[key] )
else:
data.update({key:data1[key]})
for key in data.keys():
keyData = data[key];
f.write('\t<QuillWordList key="%s">\n'%key)
for cWord in keyData:
f.write('\t\t<QuillWord>\n')
f.write('\t\t\t<Literal>%s</Literal>\n'%cWord.word)
f.write('\t\t\t<Focus>%s</Focus>\n'%cWord.focus)
f.write('\t\t\t<ClassAssign>%s</ClassAssign>\n'%cWord.classID.encode('utf-8'))
f.write('\t\t\t<Count>%s</Count>\n'%cWord.count)
f.write('\t\t</QuillWord>\n')
f.write('\t</QuillWordList>\n')
f.write('</QuillTrainData>\n')
f.close()
|
jtriley/s3site
|
s3site/static.py
|
Python
|
gpl-3.0
| 2,179
| 0
|
"""
Module for storing static data structures
"""
import os
import sys
VERSION = 0.9999
PID = os.getpid()
S3SITE_CFG_DIR = os.path.join(os.path.expanduser('~'), '.s3site')
S3SITE_CFG_FILE = os.path.join(S3SITE_CFG_DIR, 'config')
S3SITE_LOG_DIR = os.path.join(S3SITE_CFG_DIR, 'logs')
S3SITE_META_FILE = '__s3site.cfg'
DEBUG_FILE = os.path.join(S3SITE_LOG_DIR, 'debug.log')
AWS_DEBUG_FILE = os.path.join(S3SITE_LOG_DIR, 'aws-debug.log')
CRASH_FILE = os.path.join(S3SITE_LOG_DIR, 'crash-report-%d.txt' % PID)
GLOBAL_SETTINGS = {
# setting, type, required?, default, options, callback
'enable_experimental': (bool, False, False, None, None),
'web_browser': (str, False, None, None, None),
'include': (list, False, [], None, None),
}
AWS_SETTINGS = {
'aws_access_key_id': (str, True, None, None, None),
'aws_secret_access_key': (str, True, None, None, None),
'aws_user_id': (str, False, None, None, None),
'aws_port': (int, False, None, None, None),
'aws_ec2_path': (str, False, '/', None, None),
'aws_s3_path': (str, False, '/', None, None),
'aws_is_secure': (bool, False, True, None, None),
'aws_region_name': (str, False, None, None, None),
'aws_region_host': (str, False, None, None, None),
'aws_s3_host': (str, False, None, None, None),
'aws_proxy': (str, False, None, None, None),
'aws_proxy_port': (int, False, None, None, None),
'aws_proxy_user': (str, False, None, None, None),
'aws_proxy_pass': (str, False, None, None, None),
}
def __expand_all(path):
path = os.path.expanduser(path)
path = os.path.expandvars(path)
return path
def __makedirs(path, exit_on_failure=False):
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError:
if exi
|
t_on_fa
|
ilure:
sys.stderr.write("!!! ERROR - %s *must* be a directory\n" %
path)
elif not os.path.isdir(path) and exit_on_failure:
sys.stderr.write("!!! ERROR - %s *must* be a directory\n" % path)
sys.exit(1)
def create_config_dirs():
__makedirs(S3SITE_CFG_DIR, exit_on_failure=True)
__makedirs(S3SITE_LOG_DIR)
|
jchiang87/TimeBombs
|
display.py
|
Python
|
gpl-3.0
| 548
| 0.021898
|
from pylab import *
data = loadtxt('Data/dummy_data.dat')
posterior_sample = atleast_2d(loadtxt('posterior_sample.txt'))
ion()
for i in xrange(0, posterior_sample.shape[0]):
hold(False)
plot(data[:,0], data[:,1], 'bo')
hold(True)
plot(d
|
ata[:,0], posterior_sample[i, -data.shape[0]:], 'r-')
ylim([0, 1.1*data[:,1].max()])
draw()
ioff()
show()
hist(posterior_sample[:,9], 20)
xlabel('Number of Bursts')
show()
pos = posterior_sample[:, 10:110]
pos = pos[pos != 0.]
hist(pos, 1000)
xlabel('Time')
title('Positions of Bursts
|
')
show()
|
karlnapf/kameleon-mcmc
|
kameleon_mcmc/tools/Visualise.py
|
Python
|
bsd-2-clause
| 5,656
| 0.006895
|
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
Written (W) 2013 Heiko Strathmann
Written (W) 2013 Dino Sejdinovic
"""
from kameleon_mcmc.distribution.Gaussian import Gaussian
from matplotlib.patches import Ellipse
from matplotlib.pyplot import imshow, ylim, xlim, contour, plot, hold, gca
from numpy import linspace
from numpy.linalg.linalg import eigh
from numpy import zeros, array, exp, arctan2, sqrt
import numpy
class Visualise(object):
def __init__(self):
pass
@staticmethod
def get_plotting_arrays(distribution):
bounds = distribution.get_plotting_bounds()
assert(len(bounds) == 2)
Xs = linspace(bounds[0][0], bounds[0][1])
Ys = linspace(bounds[1][0], bounds[1][1])
return Xs, Ys
@staticmethod
def visualise_distribution(distribution, Z=None, log_density=False, Xs=None, Ys=None):
"""
Plots the density of a given Distribution instance and plots some
samples on top.
"""
if Xs is None or Ys is None:
Xs, Ys = Visualise.get_plotting_arrays(distribution)
Visualise.plot_density(distribution, Xs, Ys)
if Z is not None:
hold(True)
Visualise.plot_data(Z)
hold(False)
@staticmethod
def plot_density(distribution, Xs, Ys, log_domain=False):
"""
Plots a 2D density
density - density - distribution instance to plot
Xs - x values the density is evaluated at
Ys - y values the density is evaluated at
log_domain - if False, density will be put into exponential function
"""
assert(distribution.dimension == 2)
D = zeros((len(Xs), len(Ys)))
# compute log-d
|
ensity
for i in range(len(Xs)):
for j in range(len(Ys)):
x = array([[Xs[i], Ys[j]]])
D[j, i] = distribution.log_pdf(x)
if log_domain == False:
D = exp(D)
im = imshow(D, origin='lower')
im.set_extent([Xs.min(), Xs.max(), Ys.min(), Ys.max()])
im.set_interpolation('nearest')
im.set_cmap('gray')
ylim([Y
|
s.min(), Ys.max()])
xlim([Xs.min(), Xs.max()])
@staticmethod
def contour_plot_density(distribution, Xs=None, Ys=None, log_domain=False):
"""
Contour-plots a 2D density. If Gaussian, plots 1.96 interval contour only
density - distribution instance to plot
Xs - x values the density is evaluated at
Ys - y values the density is evaluated at
log_domain - if False, density will be put into exponential function
"""
if isinstance(distribution, Gaussian) and log_domain == False:
gca().add_artist(Visualise.get_gaussian_ellipse_artist(distribution))
gca().plot(distribution.mu[0], distribution.mu[1], 'r*', \
markersize=3.0, markeredgewidth=.1)
return
assert(distribution.dimension == 2)
if Xs is None:
(xmin, xmax), _ = distribution.get_plotting_bounds()
Xs = linspace(xmin, xmax)
if Ys is None:
_, (ymin, ymax) = distribution.get_plotting_bounds()
Ys = linspace(ymin, ymax)
D = zeros((len(Ys), len(Xs)))
# compute log-density
for i in range(len(Xs)):
for j in range(len(Ys)):
x = array([[Xs[i], Ys[j]]])
D[j, i] = distribution.log_pdf(x)
if log_domain == False:
D = exp(D)
contour(Xs, Ys, D, origin='lower')
@staticmethod
def plot_array(Xs, Ys, D):
"""
Plots a 2D array
Xs - x values the density is evaluated at
Ys - y values the density is evaluated at
D - array to plot
"""
im = imshow(D, origin='lower')
im.set_extent([Xs.min(), Xs.max(), Ys.min(), Ys.max()])
im.set_interpolation('nearest')
im.set_cmap('gray')
ylim([Ys.min(), Ys.max()])
xlim([Xs.min(), Xs.max()])
@staticmethod
def plot_data(Z, y=None):
"""
Plots collection of 2D points and optionally adds a marker to one of them
Z - set of row-vectors points to plot
y - one point that is marked in red, might be None
"""
plot(Z[:, 0], Z[:, 1], '*', markersize=3.0, markeredgewidth=.1)
if y is not None:
plot(y[0, 0], y[0, 1], 'r*', markersize=10.0, markeredgewidth=.1)
@staticmethod
def get_gaussian_ellipse_artist(gaussian, nstd=1.96, linewidth=1):
"""
Returns an allipse artist for nstd times the standard deviation of this
Gaussian
"""
assert(isinstance(gaussian, Gaussian))
assert(gaussian.dimension == 2)
# compute eigenvalues (ordered)
vals, vecs = eigh(gaussian.L.dot(gaussian.L.T))
order = vals.argsort()[::-1]
vals, vecs = vals[order], vecs[:, order]
theta = numpy.degrees(arctan2(*vecs[:, 0][::-1]))
# width and height are "full" widths, not radius
width, height = 2 * nstd * sqrt(vals)
e = Ellipse(xy=gaussian.mu, width=width, height=height, angle=theta, \
edgecolor="red", fill=False, linewidth=linewidth)
return e
|
Seraf/LISA
|
lisa/server/tests/test_plugins.py
|
Python
|
mit
| 2,790
| 0.002867
|
import os
from twisted.trial import unittest
from lisa.server.plugins.PluginManager import PluginManagerSingleton
class LisaPluginTestCase(unittest.TestCase):
def setUp(self):
self.pluginManager = PluginManagerSingleton.g
|
et()
def test_a_install_plugin_ok(self):
answer = self.pluginManager.installPlugin(plugin_name="UnitTest", test_mode=True, version='0.1.6')
self.assertEqual(answer['status'], "success")
def test_aa_install_plugin_fail(self):
answer = self.pluginManager.installPlugin(plugin_name="UnitTest", test_mode=True)
self.a
|
ssertEqual(answer['status'], "fail")
def test_b_disable_plugin_ok(self):
answer = self.pluginManager.disablePlugin(plugin_name="UnitTest")
self.assertEqual(answer['status'], "success")
def test_bb_disable_plugin_fail(self):
answer = self.pluginManager.disablePlugin(plugin_name="UnitTest")
self.assertEqual(answer['status'], "fail")
def test_c_enable_plugin_ok(self):
answer = self.pluginManager.enablePlugin(plugin_name="UnitTest")
self.assertEqual(answer['status'], "success")
def test_cc_enable_plugin_fail(self):
answer = self.pluginManager.enablePlugin(plugin_name="UnitTest")
self.assertEqual(answer['status'], "fail")
def test_d_upgrade_plugin_ok(self):
answer = self.pluginManager.upgradePlugin(plugin_name="UnitTest", test_mode=True)
self.assertEqual(answer['status'], "success")
def test_dd_upgrade_plugin_fail(self):
answer = self.pluginManager.upgradePlugin(plugin_name="UnitTest", test_mode=True)
self.assertEqual(answer['status'], "fail")
def test_e_load_plugin(self):
answer = self.pluginManager.loadPlugins()
test_list = ['UnitTest']
self.assertListEqual(answer, test_list)
def test_f_methodList_plugin(self):
answer = self.pluginManager.methodListPlugin()
methodlist = [{'methods': ['test'], 'plugin': u'UnitTest'}, {'core': 'intents', 'methods': ['list']}]
self.assertListEqual(answer, methodlist)
def test_g_create_plugin(self):
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "lisa.server.web.weblisa.settings")
answer = self.pluginManager.createPlugin(plugin_name="TestPlugin", author_name="TestAuthor",
author_email="test@test.com")
self.assertEqual(answer['status'], "success")
def test_h_uninstall_plugin(self):
answer = self.pluginManager.uninstallPlugin(plugin_name="UnitTest")
self.assertEqual(answer['status'], "success")
def test_hh_uninstall_plugin(self):
answer = self.pluginManager.uninstallPlugin(plugin_name="UnitTest")
self.assertEqual(answer['status'], "fail")
|
lo-windigo/fragdev
|
wiblog/migrations/0004_auto_20170703_1156.py
|
Python
|
agpl-3.0
| 1,015
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-03 18:56
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.manager
class Migration(migrations.Migration):
dependencies = [
('wiblog', '0003_auto_20160325_1441'),
]
operations = [
migrations.AlterModelManagers(
name='comment',
managers=[
('approved', django.db.m
|
odels.manager.Manager()),
],
),
migrations.AlterModelManagers(
name='post',
managers=[
('published', django.db.models.manager.Manager()),
],
),
migrations.AlterF
|
ield(
model_name='comment',
name='url',
field=models.URLField(blank=True, null=True),
),
migrations.AlterField(
model_name='tag',
name='desc',
field=models.SlugField(unique=True, verbose_name='Tag'),
),
]
|
h-mayorquin/time_series_basic
|
examples/auto_correlations_compare.py
|
Python
|
bsd-3-clause
| 1,897
| 0
|
"""
This scripts compares the autocorrelation in statsmodels with
the one that you can build using only correlate.
"""
import numpy as np
import matplotlib.pyplot as plt
import scipy.signal as signal
import statsmodels.api as sm
from signals.time_series_class import MixAr, AR
from signals.aux_functions import sidekick
plot = False
plot2 = True
# Time parameters
dt = 0.1
Tmax = 100
# Let's get the axuiliary class
amplitude = 1
w1 = 1
w2 = 5
beta = sidekick(w1, w2, dt, Tmax, amplitude)
# First we need the phi's vector
phi0 = 0.0
phi1 = -0.8
phi2 = 0.3
phi = np.array((phi0, phi1, phi2))
# Now we need the initial conditions
x0 = 1
x1 = 1
x2 = 0
initial_conditions = np.array((x0, x1, x2))
# First we construct the series without the sidekick
B = AR(phi, dt=dt, Tmax=Tmax)
B.initial_conditions(initial_conditions)
normal_series = B.construct_series()
# Second we construct the series with the mix
A = MixAr(phi, dt=dt, Tmax=Tmax, beta=beta)
A.initial_conditions(initial_conditions)
mix_series = A.construct_series()
time = A.time
if plot:
plt.subplot(3, 1, 1)
plt.plot(time, beta)
plt.subplot(3, 1, 2)
plt.plot(time, normal_series)
plt.subplot(3, 1, 3)
p
|
lt.plot(time, mix_series)
plt.show()
# Let's calculate the auto correlation
nlags = 40
normal_series -= normal_series.mean()
var = np.var(normal_series)
n = len(normal_series)
nlags1 = nlags
normalizing = np.arange(n, n - nlags1, -1)
auto_correlation1 = np.correlate(normal_series, normal_series, mode='full')
aux = auto_c
|
orrelation1.size/2
auto_correlation1 = auto_correlation1[aux:aux + nlags1] / (normalizing * var)
auto_correlation2 = sm.tsa.stattools.acf(normal_series, nlags=nlags)
print 'result', np.sum(auto_correlation1 - auto_correlation2)
if plot2:
plt.subplot(2, 1, 1)
plt.plot(auto_correlation1)
plt.subplot(2, 1, 2)
plt.plot(auto_correlation2)
plt.show()
|
Answeror/torabot
|
torabot/tasks/delete.py
|
Python
|
mit
| 1,277
| 0.003132
|
from logbook import Logger
from ..core.local import get_current_conf
from ..core.connection import autoccontext
from .. import db
from datetime import timedelta, datetime
log = Logger(__name__)
def del_inactive_queries():
conf = get_current_conf()
with autoccontext(commit=True) as conn:
before = db.get_query_count(conn)
db.del_inactive_queries(
conn,
before=datetime.utcnow() - timedelta(days=conf['TORABOT_DELETE_INACTIVE_QUERIES_BEFORE_DAYS']),
limit=conf['TORABOT_DELETE_INACTIVE_QUERIES_LIMIT']
)
after = db.get_query_count(conn)
log.info('delete inactive queries, from {} to {}, deleted {}', before, after, before - after)
return before - after
def del_old_changes():
conf = get_current_conf()
with autoccontext(commit=True) as conn:
before =
|
db.get_change_count(conn)
db.del_old_changes(
conn,
before=datetime.utcnow() - timedelta(days=conf['TORABOT_DELETE_OLD_CHANGES_BEFORE_DAYS']),
limit=conf['TORABOT_DELETE_OLD_CHANGES_LIMIT']
)
after = db.get_change_count(conn)
log.info('delete old changes, from {} to {}, delet
|
ed {}', before, after, before - after)
return before - after
|
nacc/autotest
|
client/tests/aio_dio_bugs/aio_dio_bugs.py
|
Python
|
gpl-2.0
| 1,337
| 0.005236
|
import os
from autotest.client import test, utils
# tests is a simple array of "cmd" "arguments"
tests = [["aio-dio-invalidate-failure", "poo"],
["aio-dio-subblock-eof-read", "eoftest"],
["aio-free-ring-with-bogus-nr-pages", ""],
["aio-io-setup-with-nonwritable-context-pointer", ""],
["aio-dio-extend-stat", "file"],
]
name = 0
arglist = 1
class aio_dio_bugs(test.test):
version = 5
preserve_srcdir = True
def initialize(self):
self.job.require_gcc()
self.job.setup_dep(['libaio'])
ldflags = '-L ' + self.a
|
utodir + '/deps/libaio/lib'
cflags = '-I ' + self.autodir + '/deps/libaio/include'
self.gcc_flags = ldflags + ' ' + cflags
def setup(self):
os.chdir(s
|
elf.srcdir)
utils.make('"CFLAGS=' + self.gcc_flags + '"')
def execute(self, args = ''):
os.chdir(self.tmpdir)
libs = self.autodir + '/deps/libaio/lib/'
ld_path = utils.prepend_path(libs,
utils.environ('LD_LIBRARY_PATH'))
var_ld_path = 'LD_LIBRARY_PATH=' + ld_path
for test in tests:
cmd = self.srcdir + '/' + test[name] + ' ' + args + ' ' \
+ test[arglist]
utils.system(var_ld_path + ' ' + cmd)
|
alexforencich/xfcp
|
lib/eth/lib/axis/tb/axis_frame_length_adjust_fifo/test_axis_frame_length_adjust_fifo.py
|
Python
|
mit
| 9,383
| 0.001172
|
#!/usr/bin/env python
"""
Copyright (c) 2021 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import itertools
import logging
import os
import cocotb_test.simulator
import pytest
import cocotb
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge
from cocotb.regression import TestFactory
from cocotbext.axi import AxiStreamBus, AxiStreamFrame, AxiStreamSource, AxiStreamSink
from cocotbext.axi.stream import define_stream
StatusBus, StatusTransaction, StatusSource, StatusSink, StatusMonitor = define_stream("Status",
signals=["frame_pad", "frame_truncate", "frame_length", "frame_original_length", "valid"],
optional_signals=["ready"]
)
class TB(object):
def __init__(self, dut):
self.dut = dut
self.log = logging.getLogger("cocotb.tb")
self.log.setLevel(logging.DEBUG)
cocotb.fork(Clock(dut.clk, 10, units="ns").start())
self.source = AxiStreamSource(AxiStreamBus.from_prefix(dut, "s_axis"), dut.clk, dut.rst)
self.sink = AxiStreamSink(AxiStreamBus.from_prefix(dut, "m_axis"), dut.clk, dut.rst)
# Status
self.status_sink = StatusSink(StatusBus.from_prefix(dut, "status"), dut.clk, dut.rst)
self.dut.length_min.setimmediatevalue(0)
self.dut.length_max.setimmediatevalue(2048)
def set_idle_generator(self, generator=None):
if generator:
self.source.set_pause_generator(generator())
def set_backpressure_generator(self, generator=None):
if generator:
self.sink.set_pause_generator(generator())
async def reset(self):
self.dut.rst.setimmediatevalue(0)
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
self.dut.rst <= 1
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
self.dut.rst <= 0
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
async def run_test(dut, payload_lengths=None, payload_data=None, idle_inserter=None, backpressure_inserter=None):
tb = TB(dut)
data_width = len(tb.source.bus.tkeep)
byte_width = data_width // 8
id_count = 2**len(tb.source.bus.tid)
cur_id = 1
await tb.reset()
tb.set_idle_generator(idle_inserter)
tb.set_backpressure_generator(backpressure_inserter)
for length_max in range(1, 4):
for length_min in range(0, length_max+1):
tb.log.info("length_min %d, length_max %d", length_min, length_max)
await RisingEdge(dut.clk)
tb.dut.length_min <= length_min
tb.dut.length_max <= length_max
await RisingEdge(dut.clk)
test_frames = []
for test_data in [payload_data(x) for x in payload_lengths()]:
test_frame = AxiStreamFrame(test_data, tid=cur_id, tdest=cur_id)
test_frames.append(test_frame)
await tb.source.send(test_frame)
cur_id = (cur_id + 1) % id_count
for test_frame in test_frames:
rx_frame = await tb.sink.recv()
len_rx = len(rx_frame.tdata)
len_test = len(test_frame.tdata)
len_min = min(len_rx, len_test)
assert len_rx >= length_min
assert len_rx <= length_max
assert rx_frame.tdata[:len_min] == test_frame.tdata[:len_min]
assert rx_frame.tid == test_frame.tid
assert rx_frame.tdest == test_frame.tdest
assert not rx_frame.tuser
status = await tb.status_sink.recv()
tb.log.info("Status: %s", status)
assert status.frame_pad == int(len_test < length_min)
assert status.frame_truncate == int(len_test > length_max)
assert status.frame_length == len_rx
assert status.frame_o
|
riginal_length == len_test
assert tb.sink.empty()
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
async def run_test_tuser_assert(dut):
tb = TB(dut)
await tb.reset()
test_data = bytearray(itertools.islice(itertools.cycle(range(256)), 32))
test_frame = AxiStreamFrame(test
|
_data, tuser=1)
await tb.source.send(test_frame)
rx_frame = await tb.sink.recv()
assert rx_frame.tdata == test_data
assert rx_frame.tuser
assert tb.sink.empty()
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
async def run_test_init_sink_pause(dut):
tb = TB(dut)
await tb.reset()
tb.sink.pause = True
test_data = bytearray(itertools.islice(itertools.cycle(range(256)), 32))
test_frame = AxiStreamFrame(test_data)
await tb.source.send(test_frame)
for k in range(64):
await RisingEdge(dut.clk)
tb.sink.pause = False
rx_frame = await tb.sink.recv()
assert rx_frame.tdata == test_data
assert not rx_frame.tuser
assert tb.sink.empty()
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
async def run_test_init_sink_pause_reset(dut):
tb = TB(dut)
await tb.reset()
tb.sink.pause = True
test_data = bytearray(itertools.islice(itertools.cycle(range(256)), 32))
test_frame = AxiStreamFrame(test_data)
await tb.source.send(test_frame)
for k in range(64):
await RisingEdge(dut.clk)
await tb.reset()
tb.sink.pause = False
for k in range(64):
await RisingEdge(dut.clk)
assert tb.sink.empty()
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
async def run_test_overflow(dut):
tb = TB(dut)
await tb.reset()
tb.sink.pause = True
test_data = bytearray(itertools.islice(itertools.cycle(range(256)), 2048))
test_frame = AxiStreamFrame(test_data)
await tb.source.send(test_frame)
for k in range(2048):
await RisingEdge(dut.clk)
tb.sink.pause = False
rx_frame = await tb.sink.recv()
assert rx_frame.tdata == test_data
assert not rx_frame.tuser
assert tb.sink.empty()
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
def cycle_pause():
return itertools.cycle([1, 1, 1, 0])
def size_list():
data_width = len(cocotb.top.m_axis_tdata)
byte_width = data_width // 8
return list(range(1, byte_width*4+1))+[512]+[1]*64
def incrementing_payload(length):
return bytearray(itertools.islice(itertools.cycle(range(256)), length))
if cocotb.SIM_NAME:
factory = TestFactory(run_test)
factory.add_option("payload_lengths", [size_list])
factory.add_option("payload_data", [incrementing_payload])
factory.add_option("idle_inserter", [None, cycle_pause])
factory.add_option("backpressure_inserter", [None, cycle_pause])
factory.generate_tests()
for test in [run_test_tuser_assert, run_test_init_sink_pause, run_test_init_sink_pause_reset, run_test_overflow]:
factory = TestFactory(test)
factory.generate_tests()
# cocotb-test
tests_dir = os.path.dirname(__file__)
rtl_dir = os.path.abspath(os.path.join(tests_dir, '..', '..', 'rtl'))
@pytest.mark.parametrize("data_width", [8, 16, 32])
def test_axis_frame_length_adjust_fifo(request, data_width):
dut = "axis_frame_length_adjust_fifo"
|
miguelpedroso/neuralmind
|
examples/cifar10_mlp2.py
|
Python
|
mit
| 1,690
| 0.049112
|
import sys
sys.path.append("../")
sys.path.append("../neuralmind")
import gzip
import cPickle
import numpy as np
import theano
import theano.
|
tensor as T
from neuralmind import NeuralNetwork
from layers import HiddenLayer
from layers import DropoutLayer
import activations
from trainers import SGDTrainer
from trainers import ExponentialDecay
import datasets
# Load MNIST
#datasets = datasets.load_cifar10("/home/miguel/deeplearning/datasets")
datasets = datasets.load_cifar10("/home/ubuntu/deeplearning/datasets")
"""
model = NeuralNetwork(
n_inputs=32*32*3,
layers = [
(HiddenLayer,
{
'n_units': 512,
'non_linearity': activations.rec
|
tify
}),
(HiddenLayer,
{
'n_units': 512,
'non_linearity': activations.rectify
}),
(HiddenLayer,
{
'n_units': 10,
'non_linearity': activations.softmax
})
],
trainer=(SGDTrainer,
{
'batch_size': 20,
'learning_rate': 0.1,
'n_epochs': 400,
'global_L2_regularization': 0.0001,
'dynamic_learning_rate': (ExponentialDecay, {'decay': 0.99}),
}
)
)
"""
model = NeuralNetwork(
n_inputs=32*32*3,
layers = [
(HiddenLayer,
{
'n_units': 1024,
'non_linearity': activations.rectify
}),
(DropoutLayer, {'probability': 0.5}),
(HiddenLayer,
{
'n_units': 1024,
'non_linearity': activations.rectify
}),
(DropoutLayer, {'probability': 0.5}),
(HiddenLayer,
{
'n_units': 10,
'non_linearity': activations.softmax
})
],
trainer=(SGDTrainer,
{
'batch_size': 512,
'learning_rate': 0.1,
'n_epochs': 400,
#'global_L2_regularization': 0.0001,
'dynamic_learning_rate': (ExponentialDecay, {'decay': 0.99}),
}
)
)
model.train(datasets[0], datasets[1])
|
atizo/djangojames
|
djangojames/forms/fields.py
|
Python
|
gpl-2.0
| 2,436
| 0.006568
|
# -*- coding: utf-8 -*-
#
# Atizo - The Open Innovation Platform
# http://www.atizo.com/
#
# Copyright (c) 2008-2010 Atizo AG. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from django.forms.util import flatatt
from
|
django.for
|
ms.widgets import Select
from django.utils.safestring import mark_safe
from django import forms
from widgets import Html5DateTimeInput
class InputLabelWidget(Select):
def render(self, name, value, attrs=None, choices=()):
if value is None: value = ''
final_attrs = self.build_attrs(attrs, name=name)
label = final_attrs.get('label','')
if label:
del final_attrs['label']
output = [u'<select%s>' % flatatt(final_attrs)]
if label:
output.append(self.render_option([], '', '- %s -' % label))
options = self.render_options(choices, [value])
if options:
output.append(options)
output.append(u'</select>')
return mark_safe(u'\n'.join(output))
class LabelCharField(forms.CharField):
widget = InputLabelWidget
def __init__(self, *args, **kwargs):
super(LabelCharField, self).__init__(*args, **kwargs)
self.label = kwargs.get('label', '')
def widget_attrs(self, widget):
if self.label:
return {'label': u'%s' % self.label}
return {}
class LabelIntegerField(forms.IntegerField):
widget = InputLabelWidget
def __init__(self, *args, **kwargs):
super(LabelIntegerField, self).__init__(*args, **kwargs)
self.label = kwargs.get('label', '')
def widget_attrs(self, widget):
if self.label:
return {'label': u'%s' % self.label}
return {}
|
woju/qubes-core-admin
|
tests/hardware.py
|
Python
|
lgpl-2.1
| 2,564
| 0.00078
|
#!/usr/bin/python2
# -*- coding: utf-8 -*-
#
# The Qubes OS Project, http://www.qubes-os.org
#
# Copyright (C) 2016 Marek Marczykowski-Górecki
# <marmarek@invisiblethingslab.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even th
|
e implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
#
|
Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see <https://www.gnu.org/licenses/>.
#
#
import os
import qubes.tests
import time
import subprocess
from unittest import expectedFailure
class TC_00_HVM(qubes.tests.SystemTestsMixin, qubes.tests.QubesTestCase):
def setUp(self):
super(TC_00_HVM, self).setUp()
self.vm = self.qc.add_new_vm("QubesHVm",
name=self.make_vm_name('vm1'))
self.vm.create_on_disk(verbose=False)
@expectedFailure
def test_000_pci_passthrough_presence(self):
pcidev = os.environ.get('QUBES_TEST_PCIDEV', None)
if pcidev is None:
self.skipTest('Specify PCI device with QUBES_TEST_PCIDEV '
'environment variable')
self.vm.pcidevs = [pcidev]
self.vm.pci_strictreset = False
self.qc.save()
self.qc.unlock_db()
init_script = (
"#!/bin/sh\n"
"set -e\n"
"lspci -n > /dev/xvdb\n"
"poweroff\n"
)
self.prepare_hvm_system_linux(self.vm, init_script,
['/usr/sbin/lspci'])
self.vm.start()
timeout = 60
while timeout > 0:
if not self.vm.is_running():
break
time.sleep(1)
timeout -= 1
if self.vm.is_running():
self.fail("Timeout while waiting for VM shutdown")
with open(self.vm.storage.private_img, 'r') as f:
lspci_vm = f.read(512).strip('\0')
p = subprocess.Popen(['lspci', '-ns', pcidev], stdout=subprocess.PIPE)
(lspci_host, _) = p.communicate()
# strip BDF, as it is different in VM
pcidev_desc = ' '.join(lspci_host.strip().split(' ')[1:])
self.assertIn(pcidev_desc, lspci_vm)
|
adam111316/SickGear
|
lib/chardet/eucjpprober.py
|
Python
|
gpl-3.0
| 3,754
| 0.001332
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .enums import ProbingState, MachineState
from .mbcharsetprober im
|
port MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistri
|
bution import EUCJPDistributionAnalysis
from .jpcntx import EUCJPContextAnalysis
from .mbcssm import EUCJP_SM_MODEL
class EUCJPProber(MultiByteCharSetProber):
def __init__(self):
super(EUCJPProber, self).__init__()
self.coding_sm = CodingStateMachine(EUCJP_SM_MODEL)
self.distribution_analyzer = EUCJPDistributionAnalysis()
self.context_analyzer = EUCJPContextAnalysis()
self.reset()
def reset(self):
super(EUCJPProber, self).reset()
self.context_analyzer.reset()
@property
def charset_name(self):
return "EUC-JP"
def feed(self, byte_str):
for i in range(len(byte_str)):
# PY3K: byte_str is a byte array, so byte_str[i] is an int, not a byte
coding_state = self.coding_sm.next_state(byte_str[i])
if coding_state == MachineState.error:
self.logger.debug('%s prober hit error at byte %s',
self.charset_name, i)
self._state = ProbingState.not_me
break
elif coding_state == MachineState.its_me:
self._state = ProbingState.found_it
break
elif coding_state == MachineState.start:
char_len = self.coding_sm.get_current_charlen()
if i == 0:
self._last_char[1] = byte_str[0]
self.context_analyzer.feed(self._last_char, char_len)
self.distribution_analyzer.feed(self._last_char, char_len)
else:
self.context_analyzer.feed(byte_str[i - 1:i + 1],
char_len)
self.distribution_analyzer.feed(byte_str[i - 1:i + 1],
char_len)
self._last_char[0] = byte_str[-1]
if self.state == ProbingState.detecting:
if (self.context_analyzer.got_enough_data() and
(self.get_confidence() > self.SHORTCUT_THRESHOLD)):
self._state = ProbingState.found_it
return self.state
def get_confidence(self):
context_conf = self.context_analyzer.get_confidence()
distrib_conf = self.distribution_analyzer.get_confidence()
return max(context_conf, distrib_conf)
|
mattseymour/django
|
tests/serializers/models/base.py
|
Python
|
bsd-3-clause
| 3,903
| 0.000769
|
"""
Serialization
``django.core.serializers`` provides interfaces to converting Django
``QuerySet`` objects to and from "flat" data (i.e. strings).
"""
from decimal import Decimal
from django.db import models
class CategoryMetaDataManager(models.Manager):
def get_by_natural_key(self, kind, name):
return self.get(kind=kind, name=name)
class CategoryMetaData(models.Model):
kind = models.CharField(max_length=10)
name = models.CharField(max_length=10)
value = models.CharField(max_length=10)
objects = CategoryMetaDataManager()
class Meta:
unique_together = (('kind', 'name'),)
def __str__(self):
return '[%s:%s]=%s' % (self.kind, self.name, self.value)
def natural_key(self):
return (self.kind, self.name)
class Category(models.Model):
name = models.CharField(max_length=20)
meta_data = models.ForeignKey(CategoryMetaData, models.SET_NULL, null=True, default=None)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
class Author(models.Model):
name = models.CharField(max_length=20)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
class Article(models.Model):
author = models.ForeignKey(Author, models.CASCADE)
headline = models.CharField(max_length=50)
pub_date = models.DateTimeField()
categories = models.ManyToManyField(Category)
meta_data = models.ManyToManyField(CategoryMetaData)
class Meta:
ordering = ('pub_date',)
def __str__(self):
return self.headline
class AuthorProfile(models.Model):
author = models.OneToOneField(Author, models.CASCADE, primary_key=True)
date_of_birth = models.DateField()
def __str__(self):
return "Profile of %s" % self.author
class Actor(models.Model):
name = models.
|
CharField(max_length=20, primary_ke
|
y=True)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
class Movie(models.Model):
actor = models.ForeignKey(Actor, models.CASCADE)
title = models.CharField(max_length=50)
price = models.DecimalField(max_digits=6, decimal_places=2, default=Decimal('0.00'))
class Meta:
ordering = ('title',)
def __str__(self):
return self.title
class Score(models.Model):
score = models.FloatField()
class Team:
def __init__(self, title):
self.title = title
def __str__(self):
raise NotImplementedError("Not so simple")
def to_string(self):
return "%s" % self.title
class TeamField(models.CharField):
def __init__(self):
super(TeamField, self).__init__(max_length=100)
def get_db_prep_save(self, value, connection):
return str(value.title)
def to_python(self, value):
if isinstance(value, Team):
return value
return Team(value)
def from_db_value(self, value, expression, connection, context):
return Team(value)
def value_to_string(self, obj):
return self.value_from_object(obj).to_string()
def deconstruct(self):
name, path, args, kwargs = super(TeamField, self).deconstruct()
del kwargs['max_length']
return name, path, args, kwargs
class Player(models.Model):
name = models.CharField(max_length=50)
rank = models.IntegerField()
team = TeamField()
def __str__(self):
return '%s (%d) playing for %s' % (self.name, self.rank, self.team.to_string())
class BaseModel(models.Model):
parent_data = models.IntegerField()
class ProxyBaseModel(BaseModel):
class Meta:
proxy = True
class ProxyProxyBaseModel(ProxyBaseModel):
class Meta:
proxy = True
class ComplexModel(models.Model):
field1 = models.CharField(max_length=10)
field2 = models.CharField(max_length=10)
field3 = models.CharField(max_length=10)
|
okuta/chainer
|
chainer/datasets/text_dataset.py
|
Python
|
mit
| 6,272
| 0
|
import io
import sys
import threading
import six
from chainer.dataset import dataset_mixin
class TextDataset(dataset_mixin.DatasetMixin):
"""Dataset of a line-oriented text file.
This dataset reads each line of text file(s) on every call of the
:meth:`__getitem__` operator.
Positions of line boundaries are cached so that you can quickliy
random access the text file by the line number.
.. note::
Cache will be built in the constructor.
You can pickle and unpickle the dataset to reuse the cache, but in
that case you are responsible to guarantee that files are not
modified after the cache has built.
Args:
paths (str or list of str):
Path to the text file(s).
If it is a string, this dataset reads a line from the text file
and emits it as :class:`str`.
If it is a list of string, this dataset reads lines from each
text file and emits it as a tuple of :class:`str`. In this case,
number of lines in all files must be the same.
encoding (str or list of str):
Name of the encoding used to decode the file.
See the description in :func:`open` for the supported options and
how it works.
When reading from multiple text files, you can also pass a list of
:class:`str` to use different encoding for each file.
errors (str or list of str):
String that specifies how decoding errors are to be handled.
See the description in :func:`open` for the supported options and
how it works.
When reading from multiple text files, you can also pass a list of
:class:`str` to use different error handling policy for each file.
newline (str or list of str):
Controls how universal newlines mode works.
See the description in :func:`open` for the supported options and
how it works.
When reading from multiple text files, you can also pass a list of
:class:`str` to use different mode for each file.
filter_func (callable):
Function to filter each line of the text file.
It should be a function that takes number of arguments equals to
the number of files. Arguments are lines loaded from each file.
The filter function must return True to accept the line, or
return False to skip the line.
"""
def __init__(
self, paths, encoding=None, errors=None, newline=None,
filter_func=None):
if isinstance(paths, six.string_types):
paths = [paths]
elif not paths:
raise ValueError('at least one text file must be specified')
if isinstance(encoding, six.string_types) or encoding is None:
encoding = [encoding] * len(paths)
if isinstance(errors, six.string_types) or errors is None:
errors = [errors] * len(paths)
if isinstance(newline, six.string_types) or newline is None:
newline = [newline] * len(paths)
if not (len(paths) == len(encoding) == len(errors) == len(newline)):
raise ValueError(
'length of each option must match with t
|
he number of '
'text files to read')
self._paths = paths
self._encoding = encoding
self._errors = errors
self._newline = newline
self._fps = None
self._open()
# Line number is 0-origin.
# `lines` is a list of line numbers not filtered; if no filter_func is
# gi
|
ven, it is range(linenum)).
# `bounds` is a list of cursor positions of line boundaries for each
# file, i.e. i-th line of k-th file starts at `bounds[k][i]`.
linenum = 0
lines = []
bounds = tuple([[0] for _ in self._fps])
while True:
data = [fp.readline() for fp in self._fps]
if not all(data): # any of files reached EOF
if any(data): # not all files reached EOF
raise ValueError(
'number of lines in files does not match')
break
for i, fp in enumerate(self._fps):
bounds[i].append(fp.tell())
if filter_func is not None and filter_func(*data):
lines.append(linenum)
linenum += 1
if filter_func is None:
lines = six.moves.range(linenum)
self._bounds = bounds
self._lines = lines
self._lock = threading.Lock()
def __getstate__(self):
state = self.__dict__.copy()
del state['_fps']
del state['_lock']
return state
def __setstate__(self, state):
self.__dict__ = state
self._open()
self._lock = threading.Lock()
def __len__(self):
return len(self._lines)
def _open(self):
self._fps = [
io.open(
path,
mode='rt',
encoding=encoding,
errors=errors,
newline=newline,
) for path, encoding, errors, newline in
six.moves.zip(self._paths, self._encoding, self._errors,
self._newline)
]
def close(self):
"""Manually closes all text files.
In most cases, you do not have to call this method, because files will
automatically be closed after TextDataset instance goes out of scope.
"""
exc = None
for fp in self._fps:
try:
fp.close()
except Exception:
exc = sys.exc_info()
if exc is not None:
six.reraise(*exc)
def get_example(self, idx):
if idx < 0 or len(self._lines) <= idx:
raise IndexError
linenum = self._lines[idx]
self._lock.acquire()
try:
for k, fp in enumerate(self._fps):
fp.seek(self._bounds[k][linenum])
lines = [fp.readline() for fp in self._fps]
if len(lines) == 1:
return lines[0]
return tuple(lines)
finally:
self._lock.release()
|
fbradyirl/home-assistant
|
homeassistant/components/http/auth.py
|
Python
|
apache-2.0
| 7,407
| 0.001215
|
"""Authentication for HTTP component."""
import base64
import logging
from aiohttp import hdrs
from aiohttp.web import middleware
import jwt
from h
|
omeassistant.auth.providers import legacy_api_password
from homeassistant.auth.util import generate_secret
from homeassistant.const import HTTP_HEADER_HA_AUTH
from homeassistant.core import callback
from homeassistant.util import dt as dt_util
from .const import KEY_AUTHENTICATED, KEY_HASS_USER, KEY_REAL_IP
_LOGGER = logging.getLogger(__name__)
|
DATA_API_PASSWORD = "api_password"
DATA_SIGN_SECRET = "http.auth.sign_secret"
SIGN_QUERY_PARAM = "authSig"
@callback
def async_sign_path(hass, refresh_token_id, path, expiration):
"""Sign a path for temporary access without auth header."""
secret = hass.data.get(DATA_SIGN_SECRET)
if secret is None:
secret = hass.data[DATA_SIGN_SECRET] = generate_secret()
now = dt_util.utcnow()
return "{}?{}={}".format(
path,
SIGN_QUERY_PARAM,
jwt.encode(
{
"iss": refresh_token_id,
"path": path,
"iat": now,
"exp": now + expiration,
},
secret,
algorithm="HS256",
).decode(),
)
@callback
def setup_auth(hass, app):
"""Create auth middleware for the app."""
old_auth_warning = set()
support_legacy = hass.auth.support_legacy
if support_legacy:
_LOGGER.warning("legacy_api_password support has been enabled.")
trusted_networks = []
for prv in hass.auth.auth_providers:
if prv.type == "trusted_networks":
trusted_networks += prv.trusted_networks
async def async_validate_auth_header(request):
"""
Test authorization header against access token.
Basic auth_type is legacy code, should be removed with api_password.
"""
try:
auth_type, auth_val = request.headers.get(hdrs.AUTHORIZATION).split(" ", 1)
except ValueError:
# If no space in authorization header
return False
if auth_type == "Bearer":
refresh_token = await hass.auth.async_validate_access_token(auth_val)
if refresh_token is None:
return False
request[KEY_HASS_USER] = refresh_token.user
return True
if auth_type == "Basic" and support_legacy:
decoded = base64.b64decode(auth_val).decode("utf-8")
try:
username, password = decoded.split(":", 1)
except ValueError:
# If no ':' in decoded
return False
if username != "homeassistant":
return False
user = await legacy_api_password.async_validate_password(hass, password)
if user is None:
return False
request[KEY_HASS_USER] = user
_LOGGER.info(
"Basic auth with api_password is going to deprecate,"
" please use a bearer token to access %s from %s",
request.path,
request[KEY_REAL_IP],
)
old_auth_warning.add(request.path)
return True
return False
async def async_validate_signed_request(request):
"""Validate a signed request."""
secret = hass.data.get(DATA_SIGN_SECRET)
if secret is None:
return False
signature = request.query.get(SIGN_QUERY_PARAM)
if signature is None:
return False
try:
claims = jwt.decode(
signature, secret, algorithms=["HS256"], options={"verify_iss": False}
)
except jwt.InvalidTokenError:
return False
if claims["path"] != request.path:
return False
refresh_token = await hass.auth.async_get_refresh_token(claims["iss"])
if refresh_token is None:
return False
request[KEY_HASS_USER] = refresh_token.user
return True
async def async_validate_trusted_networks(request):
"""Test if request is from a trusted ip."""
ip_addr = request[KEY_REAL_IP]
if not any(ip_addr in trusted_network for trusted_network in trusted_networks):
return False
user = await hass.auth.async_get_owner()
if user is None:
return False
request[KEY_HASS_USER] = user
return True
async def async_validate_legacy_api_password(request, password):
"""Validate api_password."""
user = await legacy_api_password.async_validate_password(hass, password)
if user is None:
return False
request[KEY_HASS_USER] = user
return True
@middleware
async def auth_middleware(request, handler):
"""Authenticate as middleware."""
authenticated = False
if HTTP_HEADER_HA_AUTH in request.headers or DATA_API_PASSWORD in request.query:
if request.path not in old_auth_warning:
_LOGGER.log(
logging.INFO if support_legacy else logging.WARNING,
"api_password is going to deprecate. You need to use a"
" bearer token to access %s from %s",
request.path,
request[KEY_REAL_IP],
)
old_auth_warning.add(request.path)
if hdrs.AUTHORIZATION in request.headers and await async_validate_auth_header(
request
):
# it included both use_auth and api_password Basic auth
authenticated = True
# We first start with a string check to avoid parsing query params
# for every request.
elif (
request.method == "GET"
and SIGN_QUERY_PARAM in request.query
and await async_validate_signed_request(request)
):
authenticated = True
elif trusted_networks and await async_validate_trusted_networks(request):
if request.path not in old_auth_warning:
# When removing this, don't forget to remove the print logic
# in http/view.py
request["deprecate_warning_message"] = (
"Access from trusted networks without auth token is "
"going to be removed in Home Assistant 0.96. Configure "
"the trusted networks auth provider or use long-lived "
"access tokens to access {} from {}".format(
request.path, request[KEY_REAL_IP]
)
)
old_auth_warning.add(request.path)
authenticated = True
elif (
support_legacy
and HTTP_HEADER_HA_AUTH in request.headers
and await async_validate_legacy_api_password(
request, request.headers[HTTP_HEADER_HA_AUTH]
)
):
authenticated = True
elif (
support_legacy
and DATA_API_PASSWORD in request.query
and await async_validate_legacy_api_password(
request, request.query[DATA_API_PASSWORD]
)
):
authenticated = True
request[KEY_AUTHENTICATED] = authenticated
return await handler(request)
app.middlewares.append(auth_middleware)
|
timwaizenegger/swift-bluebox
|
_runApp_Development_nodebug.py
|
Python
|
mit
| 442
| 0.011312
|
# -*- coding: utf-8 -*-
"""
Project Bluebox
Copyright (C) <2015> <
|
University of Stuttgart>
This software may be modified and distributed under the terms
of the MIT license. See the LICENSE file for details.
"""
from mcm.Bluebox import app
from mcm.Bluebox import configuration
# socketio.run(
# app,
app.run(
host=configuration.my_bind_host,
port=int(configuration.my_endpoint_port),
debug=False,
threaded=True
)
| |
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/model_utils/models.py
|
Python
|
agpl-3.0
| 3,021
| 0
|
from __future__ import unicode_literals
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.db.models.fields import FieldDoesNotExist
from django.core.exceptions import ImproperlyConfigured
from django.utils.timezone import now
from model_utils.managers import QueryManager
from model_utils.fields import AutoCreatedField, AutoLastModifiedField, \
StatusField, MonitorField
class TimeStampedModel(models.Model):
"""
An abstract base class model that provides self-updating
``created`` and ``modified`` fields.
"""
created = AutoCreatedField(_('created'))
modified = AutoLastModifiedField(_('modified'))
class Meta:
abstract = True
class TimeFramedModel(models.Model):
"""
An abstract base class model that provides ``start``
and ``end`` fields to record a timeframe.
"""
start = models.DateTimeField(_('start'), null=True, blank=True)
end = models.DateTimeField(_('end'), null=True, blank=True)
class Meta:
abstract = True
class StatusModel(models.Model):
"""
An abstract base class model with a ``status`` field that
automatically uses a ``STATUS`` class attribute of choices, a
``status_changed`` date-time field that records when ``status``
was last modified, and an automatica
|
lly-added manager for each
status that returns objects with that status only.
"""
status = StatusField(_('status'))
status_changed = MonitorField(_('status changed'), monitor='st
|
atus')
class Meta:
abstract = True
def add_status_query_managers(sender, **kwargs):
"""
Add a Querymanager for each status item dynamically.
"""
if not issubclass(sender, StatusModel):
return
for value, display in getattr(sender, 'STATUS', ()):
if _field_exists(sender, value):
raise ImproperlyConfigured(
"StatusModel: Model '%s' has a field named '%s' which "
"conflicts with a status of the same name."
% (sender.__name__, value)
)
sender.add_to_class(value, QueryManager(status=value))
def add_timeframed_query_manager(sender, **kwargs):
"""
Add a QueryManager for a specific timeframe.
"""
if not issubclass(sender, TimeFramedModel):
return
if _field_exists(sender, 'timeframed'):
raise ImproperlyConfigured(
"Model '%s' has a field named 'timeframed' "
"which conflicts with the TimeFramedModel manager."
% sender.__name__
)
sender.add_to_class('timeframed', QueryManager(
(models.Q(start__lte=now) | models.Q(start__isnull=True)) &
(models.Q(end__gte=now) | models.Q(end__isnull=True))
))
models.signals.class_prepared.connect(add_status_query_managers)
models.signals.class_prepared.connect(add_timeframed_query_manager)
def _field_exists(model_class, field_name):
return field_name in [f.attname for f in model_class._meta.local_fields]
|
nuxgu/magic_db
|
sqldb/card_dao.py
|
Python
|
gpl-3.0
| 1,343
| 0.001489
|
import logging
from dao import DAO, TableDesc, FieldDesc
log = logging.getLogger(__name__)
card_td = TableDesc("Cards", "multiverseid",
[FieldDesc("multiverseid", "int"),
FieldDesc("set_code", "text"),
FieldDesc("number", "int"),
FieldDesc("name", "text"),
FieldDesc("language", "text"),
FieldDesc("translation_of", "int"),
FieldDesc("back_face_of", "int"),
FieldDesc("equivalent_to", "int")])
class CardDAO(DAO):
@staticmethod
def create_table(co
|
nn):
card_td.create_table(conn)
def __init__(self, card, conn):
super(CardDAO, self).__init__(card_td, conn)
self.card = card
|
def get_pkey(self):
return self.card.multiverseid
def get_values(self):
return [self.card.multiverseid,
self.card.set_code,
self.card.number,
self.card.name.decode('utf-8'),
self.card.language,
self.card.translation_of.multiverseid if self.card.translation_of else None,
self.card.back_face_of.multiverseid if self.card.back_face_of else None,
self.card.equivalent_to]
def __str__(self):
return str(self.card)
|
patrickm/chromium.src
|
tools/telemetry/telemetry/core/timeline/model_unittest.py
|
Python
|
bsd-3-clause
| 529
| 0.003781
|
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.core.timeline i
|
mport model
from telemetry.core.backends.chrome import tracing_timeline_data
class TimelineModelUnittest(unittest.TestCase):
def testEmptyImport(self):
|
model.TimelineModel(
tracing_timeline_data.TracingTimelineData([]))
model.TimelineModel(
tracing_timeline_data.TracingTimelineData(''))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.