repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
feureau/Small-Scripts
|
Blender/Blender config/2.91/scripts/addons/bricksculpt_v1-2-0/classes/bricksculpt_choose_paintbrush_material.py
|
1
|
2230
|
# Copyright (C) 2019 Christopher Gearhart
# chris@bblanimation.com
# http://bblanimation.com/
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# System imports
import bmesh
import math
import importlib
# Blender imports
import bpy
import bgl
from bpy.types import Operator
from bpy.props import *
# Module imports
from .bricksculpt_framework import *
from .bricksculpt_tools import *
from .bricksculpt_drawing import *
from ..functions import *
class BRICKSCULPT_OT_choose_paintbrush_material(Operator):
"""Choose the material of the active BrickSculpt paintbrush tool"""
bl_idname = "bricksculpt.choose_paintbrush_material"
bl_label = "Choose Paintbrush Material"
bl_options = {"REGISTER", "INTERNAL"}
################################################
# Blender Operator methods
@classmethod
def poll(self, context):
scn = bpy.context.scene
return scn.bricksculpt.running_active_session
def execute(self, context):
scn = context.scene
scn.bricksculpt.choosing_material = False
return {"FINISHED"}
def invoke(self, context, event):
return context.window_manager.invoke_props_dialog(self)#, event)
def draw(self, context):
scn = context.scene
layout = self.layout
layout.prop(scn.bricksculpt, "paintbrush_mat")
###################################################
# initialization method
def __init__(self):
bpy.context.window.cursor_set("DEFAULT")
###################################################
# class variables
# NONE!
###################################################
|
gpl-3.0
| -3,855,508,242,255,955,000
| 29.135135
| 72
| 0.645291
| false
| 4.121996
| false
| false
| false
|
ComputerArchitectureGroupPWr/Floorplan-Maker
|
src/FloorplanMakerUI_old.py
|
1
|
15396
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'main_window.ui'
#
# Created: Tue Aug 5 12:46:39 2014
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
from floorplanFrameBeh import FloorplanFrame
from heatersTable import HeatersTable
from thermometersTable import ThermometersTable
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_FloorplanMaker(object):
def setupUi(self, FloorplanMaker):
FloorplanMaker.setObjectName(_fromUtf8("FloorplanMaker"))
FloorplanMaker.resize(1200,700)
self.window = FloorplanMaker
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8("../../icona.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
FloorplanMaker.setWindowIcon(icon)
self.centralwidget = QtGui.QWidget(FloorplanMaker)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.thermometers_list = []
self.horizontalLayout_4 = QtGui.QHBoxLayout(self.centralwidget)
self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4"))
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.verticalLayout_3 = QtGui.QVBoxLayout()
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.heaterTabLab = QtGui.QLabel(self.centralwidget)
self.heaterTabLab.setObjectName(_fromUtf8("heaterTabLab"))
self.verticalLayout_3.addWidget(self.heaterTabLab)
self.heaterTable = HeatersTable(self.centralwidget)
self.verticalLayout_3.addWidget(self.heaterTable)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.btnAddHeater = QtGui.QPushButton(self.centralwidget)
self.btnAddHeater.setMaximumSize(QtCore.QSize(71, 27))
self.btnAddHeater.setObjectName(_fromUtf8("btnAddHeater"))
self.horizontalLayout.addWidget(self.btnAddHeater)
self.btnModifyHeater = QtGui.QPushButton(self.centralwidget)
self.btnModifyHeater.setMaximumSize(QtCore.QSize(91, 27))
self.btnModifyHeater.setObjectName(_fromUtf8("btnModifyHeater"))
self.horizontalLayout.addWidget(self.btnModifyHeater)
self.btnDeleteHeater = QtGui.QPushButton(self.centralwidget)
self.btnDeleteHeater.setMaximumSize(QtCore.QSize(81, 27))
self.btnDeleteHeater.setObjectName(_fromUtf8("btnDeleteHeater"))
self.horizontalLayout.addWidget(self.btnDeleteHeater)
self.verticalLayout_3.addLayout(self.horizontalLayout)
self.label = QtGui.QLabel(self.centralwidget)
self.label.setObjectName(_fromUtf8("label"))
self.verticalLayout_3.addWidget(self.label)
self.tableThermometers = ThermometersTable(self.centralwidget)
self.verticalLayout_3.addWidget(self.tableThermometers)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.btnAddTherm = QtGui.QPushButton(self.centralwidget)
self.btnAddTherm.setMaximumSize(QtCore.QSize(81, 27))
self.btnAddTherm.setObjectName(_fromUtf8("btnAddTherm"))
self.horizontalLayout_2.addWidget(self.btnAddTherm)
self.btnModifyTherm = QtGui.QPushButton(self.centralwidget)
self.btnModifyTherm.setMaximumSize(QtCore.QSize(91, 27))
self.btnModifyTherm.setObjectName(_fromUtf8("btnModifyTherm"))
self.horizontalLayout_2.addWidget(self.btnModifyTherm)
self.btnDelete = QtGui.QPushButton(self.centralwidget)
self.btnDelete.setMaximumSize(QtCore.QSize(81, 27))
self.btnDelete.setObjectName(_fromUtf8("btnDelete"))
self.horizontalLayout_2.addWidget(self.btnDelete)
self.verticalLayout_3.addLayout(self.horizontalLayout_2)
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setSizeConstraint(QtGui.QLayout.SetDefaultConstraint)
self.gridLayout.setSpacing(6)
self.gridLayout.setContentsMargins(0, 20, 0, 20)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.thermsInRowEdit = QtGui.QLineEdit(self.centralwidget)
self.thermsInRowEdit.setMaximumSize(QtCore.QSize(100, 50))
self.thermsInRowEdit.setObjectName(_fromUtf8("thermsInRowEdit"))
self.gridLayout.addWidget(self.thermsInRowEdit, 0, 1, 1, 1)
self.label_2 = QtGui.QLabel(self.centralwidget)
self.label_2.setMaximumSize(QtCore.QSize(200, 16777215))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.label_2.setText("Thermometers in row: ")
self.gridLayout.addWidget(self.label_2, 0, 0, 1, 1)
self.columnsInRowEdit = QtGui.QLineEdit(self.centralwidget)
self.columnsInRowEdit.setMaximumSize(QtCore.QSize(100, 16777215))
self.columnsInRowEdit.setObjectName(_fromUtf8("columnsInRowEdit"))
self.gridLayout.addWidget(self.columnsInRowEdit, 1, 1, 1, 1)
self.label_3 = QtGui.QLabel(self.centralwidget)
self.label_3.setMaximumSize(QtCore.QSize(200, 16777215))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.label_3.setText("Thermometers in column: ")
self.gridLayout.addWidget(self.label_3, 1, 0, 1, 1)
self.generateButton = QtGui.QPushButton(self.centralwidget)
self.generateButton.setMaximumSize(QtCore.QSize(100, 16777215))
self.generateButton.setObjectName(_fromUtf8("generateButton"))
self.generateButton.setText("Generate")
self.gridLayout.addWidget(self.generateButton, 2, 0, 1, 1)
self.generateActionCombo = QtGui.QComboBox(self.centralwidget)
self.generateActionCombo.setObjectName(_fromUtf8("generateActionCombo"))
self.generateActionCombo.addItem(_fromUtf8("Linear"))
self.generateActionCombo.addItem(_fromUtf8("Net"))
self.gridLayout.addWidget(self.generateActionCombo, 2, 1, 1, 1)
self.verticalLayout_3.addLayout(self.gridLayout)
spacerItem = QtGui.QSpacerItem(20, 118, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_3.addItem(spacerItem)
self.horizontalLayout_3.addLayout(self.verticalLayout_3)
self.verticalLayout_4 = QtGui.QVBoxLayout()
self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4"))
self.floorplanLab = QtGui.QLabel(self.centralwidget)
self.floorplanLab.setObjectName(_fromUtf8("floorplanLab"))
self.verticalLayout_4.addWidget(self.floorplanLab)
self.floorplanScrollArea = QtGui.QScrollArea(self.centralwidget)
self.floorplanScrollArea.setMinimumSize(QtCore.QSize(120, 160))
self.floorplanScrollArea.setMaximumSize(QtCore.QSize(725, 16777215))
self.floorplanScrollArea.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.floorplanScrollArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.floorplanScrollArea.setWidgetResizable(True)
self.floorplanScrollArea.setObjectName(_fromUtf8("floorplanScrollArea"))
self.scrollAreaWidgetContents_2 = QtGui.QWidget()
self.scrollAreaWidgetContents_2.setGeometry(QtCore.QRect(0, 0, 584, 701))
self.scrollAreaWidgetContents_2.setObjectName(_fromUtf8("scrollAreaWidgetContents_2"))
self.verticalLayoutWidget = QtGui.QWidget(self.scrollAreaWidgetContents_2)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(0, 0, 481, 591))
self.verticalLayoutWidget.setObjectName(_fromUtf8("verticalLayoutWidget"))
self.widget = QtGui.QWidget()
self.widgetLayout = QtGui.QHBoxLayout()
self.FrameFloorplan = QtGui.QFrame()
self.FrameFloorplan.setMinimumSize(690,920)
self.FloorPlanFrame = FloorplanFrame(self.FrameFloorplan, self)
self.FloorPlanFrame.setMinimumSize(690,920)
self.FloorPlanFrame.setStyleSheet(_fromUtf8("border: 2px solid black;\n"
"border-radius: 4px;\n"
"padding: 2px;\n"
"background-color: rgb(194, 194, 194)\n"
""))
self.FloorPlanFrame.setFrameShape(QtGui.QFrame.StyledPanel)
self.FloorPlanFrame.setFrameShadow(QtGui.QFrame.Raised)
self.FloorPlanFrame.setObjectName(_fromUtf8("FloorPlanFrame"))
self.widgetLayout.addWidget(self.FrameFloorplan)
self.widget.setLayout(self.widgetLayout)
self.floorplanScrollArea.setWidget(self.widget)
FloorplanMaker.setCentralWidget(self.centralwidget)
self.verticalLayout_4.addWidget(self.floorplanScrollArea)
self.horizontalLayout_3.addLayout(self.verticalLayout_4)
self.horizontalLayout_3.setStretch(0, 2)
self.horizontalLayout_3.setStretch(1, 4)
self.horizontalLayout_4.addLayout(self.horizontalLayout_3)
FloorplanMaker.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(FloorplanMaker)
self.menubar.setGeometry(QtCore.QRect(0, 0, 997, 29))
self.menubar.setObjectName(_fromUtf8("menubar"))
self.menuPlik = QtGui.QMenu(self.menubar)
self.menuPlik.setObjectName(_fromUtf8("menuPlik"))
self.menuNarz_dzia = QtGui.QMenu(self.menubar)
self.menuNarz_dzia.setObjectName(_fromUtf8("menuNarz_dzia"))
self.menuHelp = QtGui.QMenu(self.menubar)
self.menuHelp.setObjectName(_fromUtf8("menuHelp"))
FloorplanMaker.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(FloorplanMaker)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
FloorplanMaker.setStatusBar(self.statusbar)
self.actionOpen_project = QtGui.QAction(FloorplanMaker)
self.actionOpen_project.setObjectName(_fromUtf8("actionOpen_project"))
self.actionSave_project = QtGui.QAction(FloorplanMaker)
self.actionSave_project.setObjectName(_fromUtf8("actionSave_project"))
self.actionClose = QtGui.QAction(FloorplanMaker)
self.actionClose.setObjectName(_fromUtf8("actionClose"))
self.actionNewProject = QtGui.QAction(FloorplanMaker)
self.actionNewProject.setObjectName(_fromUtf8("actionNewProject"))
self.actionOpenProject = QtGui.QAction(FloorplanMaker)
self.actionOpenProject.setObjectName(_fromUtf8("actionOpenProject"))
self.actionSave = QtGui.QAction(FloorplanMaker)
self.actionSave.setObjectName(_fromUtf8("actionSave"))
self.actionEnd = QtGui.QAction(FloorplanMaker)
self.actionEnd.setObjectName(_fromUtf8("actionEnd"))
self.actionGeneriloFile = QtGui.QAction(FloorplanMaker)
self.actionGeneriloFile.setObjectName(_fromUtf8("actionGeneriloFile"))
self.actionAbout = QtGui.QAction(FloorplanMaker)
self.actionAbout.setObjectName(_fromUtf8("actionAbout"))
self.menuPlik.addAction(self.actionNewProject)
self.menuPlik.addAction(self.actionOpenProject)
self.menuPlik.addAction(self.actionSave)
self.menuPlik.addSeparator()
self.menuPlik.addAction(self.actionEnd)
self.menuNarz_dzia.addAction(self.actionGeneriloFile)
self.menuHelp.addAction(self.actionAbout)
self.menubar.addAction(self.menuPlik.menuAction())
self.menubar.addAction(self.menuNarz_dzia.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.retranslateUi(FloorplanMaker)
QtCore.QObject.connect(self.actionEnd, QtCore.SIGNAL(_fromUtf8("triggered()")), FloorplanMaker.close)
QtCore.QMetaObject.connectSlotsByName(FloorplanMaker)
def retranslateUi(self, FloorplanMaker):
FloorplanMaker.setWindowTitle(QtGui.QApplication.translate("FloorplanMaker", "FloorplanMaker", None, QtGui.QApplication.UnicodeUTF8))
self.heaterTabLab.setText(QtGui.QApplication.translate("FloorplanMaker", "Placed heaters:", None, QtGui.QApplication.UnicodeUTF8))
self.btnAddHeater.setText(QtGui.QApplication.translate("FloorplanMaker", "Add", None, QtGui.QApplication.UnicodeUTF8))
self.btnModifyHeater.setText(QtGui.QApplication.translate("FloorplanMaker", "Modify", None, QtGui.QApplication.UnicodeUTF8))
self.btnDeleteHeater.setText(QtGui.QApplication.translate("FloorplanMaker", "Delete", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("FloorplanMaker", "Placed thermometers:", None, QtGui.QApplication.UnicodeUTF8))
self.btnAddTherm.setText(QtGui.QApplication.translate("FloorplanMaker", "Add", None, QtGui.QApplication.UnicodeUTF8))
self.btnModifyTherm.setText(QtGui.QApplication.translate("FloorplanMaker", "Modify", None, QtGui.QApplication.UnicodeUTF8))
self.btnDelete.setText(QtGui.QApplication.translate("FloorplanMaker", "Delete", None, QtGui.QApplication.UnicodeUTF8))
self.floorplanLab.setText(QtGui.QApplication.translate("FloorplanMaker", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Sans\'; font-size:10pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:11pt; font-weight:600; font-style:italic;\">Project Floorplan:</span></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.menuPlik.setTitle(QtGui.QApplication.translate("FloorplanMaker", "File", None, QtGui.QApplication.UnicodeUTF8))
self.menuNarz_dzia.setTitle(QtGui.QApplication.translate("FloorplanMaker", "Tools", None, QtGui.QApplication.UnicodeUTF8))
self.menuHelp.setTitle(QtGui.QApplication.translate("FloorplanMaker", "Help", None, QtGui.QApplication.UnicodeUTF8))
self.actionOpen_project.setText(QtGui.QApplication.translate("FloorplanMaker", "Otwórz projekt", None, QtGui.QApplication.UnicodeUTF8))
self.actionSave_project.setText(QtGui.QApplication.translate("FloorplanMaker", "Zapisz projekt", None, QtGui.QApplication.UnicodeUTF8))
self.actionClose.setText(QtGui.QApplication.translate("FloorplanMaker", "Zamknij", None, QtGui.QApplication.UnicodeUTF8))
self.actionNewProject.setText(QtGui.QApplication.translate("FloorplanMaker", "New project", None, QtGui.QApplication.UnicodeUTF8))
self.actionOpenProject.setText(QtGui.QApplication.translate("FloorplanMaker", "Open project", None, QtGui.QApplication.UnicodeUTF8))
self.actionSave.setText(QtGui.QApplication.translate("FloorplanMaker", "Save", None, QtGui.QApplication.UnicodeUTF8))
self.actionEnd.setText(QtGui.QApplication.translate("FloorplanMaker", "Close", None, QtGui.QApplication.UnicodeUTF8))
self.actionGeneriloFile.setText(QtGui.QApplication.translate("FloorplanMaker", "Create generilo file", None, QtGui.QApplication.UnicodeUTF8))
self.actionAbout.setText(QtGui.QApplication.translate("FloorplanMaker", "About", None, QtGui.QApplication.UnicodeUTF8))
|
mit
| 7,286,286,362,284,666,000
| 66.227074
| 275
| 0.725235
| false
| 3.810644
| false
| false
| false
|
Fiware/dataModels
|
tools/ldcontext_generator.py
|
1
|
9962
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This script provides two files:
- context.jsonld, that serves https://schema.lab.fiware.org/ld/fiware-data-models-context.jsonld
- mapping_list.yml, that serves https://uri.fiware.org/ns/data-models
context.jsonld is combined by extracting the properties, types and enumerations of a JSON Schema and
converting them into terms of a JSON-LD @Context. mapping_list.yml uses the result of extracting
to prepare a list of terms with schemas and specifications.
Copyright (c) 2019 FIWARE Foundation e.V.
Authors: José M. Cantera, Dmitrii Demin
"""
import json
import yaml
import os
from datetime import datetime, timezone
from argparse import ArgumentParser
# The aggregated @context will be stored here
aggregated_context = {
}
# The list of mappings (term->schema/specification) will be stored here
terms_list = {
"terms": {}
}
# The list of terms alerts will be stored here (if the specification file
# associated with the term doesn't exist)
alert_list = [
]
# Template to prepare a valid URL of a schema for a term mapping
schema_url = 'https://fiware.github.io/data-models/{}'
specification_url = 'https://fiware-datamodels.readthedocs.io/en/latest/{}'
# Agri* schemas stored at another github organization
agri_url = 'https://github.com/GSMADeveloper/NGSI-LD-Entities/blob/master/definitions/{}.md'
# Used to detect attributes which are actually relationships
ENTITY_ID = 'https://fiware.github.io/data-models/common-schema.json#/definitions/EntityIdentifierType'
def read_json(infile):
with open(infile) as data_file:
data = json.loads(data_file.read())
return data
def write_json(data, outfile):
with open(outfile, 'w') as data_file:
data_file.write(json.dumps(data, indent=4, sort_keys=True))
data_file.write("\n")
def write_yaml(data, outfile):
with open(outfile, 'w') as data_file:
data_file.write(yaml.dump(data))
# Finds a node in a JSON Schema
# (previously parsed as a Python dictionary)
def find_node(schema, node_name):
result = None
if isinstance(schema, list):
for instance in schema:
res = find_node(instance, node_name)
if res is not None:
result = res
break
elif isinstance(schema, dict):
for member in schema:
if member == node_name:
result = schema[member]
break
else:
res = find_node(schema[member], node_name)
if res is not None:
result = res
break
return result
# extracts the properties dictionary
# A list of dictionaries is returned
def extract_properties(schema):
properties = find_node(schema, 'properties')
out = []
if properties is None:
return out
for p in properties:
if p != "type" and p != "id":
prop = dict()
prop['type'] = 'Property'
prop['name'] = p
ref = find_node(properties[p], '$ref')
if ref is not None and ref == ENTITY_ID:
prop['type'] = 'Relationship'
enum = find_node(properties[p], 'enum')
if enum is not None:
prop['isEnumerated'] = True
pformat = find_node(properties[p], 'format')
if pformat is not None and pformat == 'date-time':
prop['isDate'] = True
out.append(prop)
return out
# extracts the entity type
def extract_entity_type(schema):
out = None
properties = find_node(schema, 'properties')
if properties is not None and 'type' in properties:
type_node = properties['type']
if 'enum' in type_node and len(type_node['enum']) > 0:
out = type_node['enum'][0]
return out
# extracts the enumerations
def extract_enumerations(schema):
out = []
properties = find_node(schema, 'properties')
if properties is None:
return out
for p in properties:
if p != 'type':
prop = properties[p]
enum = find_node(prop, 'enum')
if enum is not None:
if isinstance(enum, list):
for item in enum:
if isinstance(item, str):
out.append(item)
return out
# Generates the LD @context for a list of JSON Schema properties
# (which are attributes) with the URI prefix
def generate_ld_context_attrs(properties, uri_prefix, predefined_mappings):
context = {}
if properties is None:
return context
for p in properties:
p_name = p['name']
if p_name in predefined_mappings:
context[p_name] = predefined_mappings[p_name]
continue
if p['type'] == 'Relationship':
context[p_name] = {
'@type': '@id'
}
elif 'isDate' in p:
context[p_name] = {
'@type': 'https://uri.etsi.org/ngsi-ld/DateTime'
}
elif 'isEnumerated' in p:
context[p_name] = {
'@type': '@vocab'
}
if p_name in context:
context[p_name]['@id'] = uri_prefix + '#' + p_name
else:
context[p_name] = uri_prefix + '#' + p_name
return context
# Generates the LD @context for a list of JSON Schema properties
# (which are enumerated values) with the URI prefix
def generate_ld_context_enums(properties, uri_prefix, predefined_mappings):
context = {}
if properties is None:
return context
for p in properties:
if p in predefined_mappings:
context[p] = predefined_mappings[p]
else:
context[p] = uri_prefix + '#' + p
return context
# Extracts from the schema the relevant JSON-LD @context
def schema_2_ld_context(schema, uri_prefix, predefined_mappings):
properties = extract_properties(schema)
entity_type = extract_entity_type(schema)
enumerations = extract_enumerations(schema)
ld_context = dict()
ld_context['Attribute'] = generate_ld_context_attrs(
properties, uri_prefix, predefined_mappings)
ld_context['Enumeration Value'] = generate_ld_context_enums(
enumerations, uri_prefix, predefined_mappings)
ld_context['Entity Type'] = dict()
if entity_type is not None:
ld_context['Entity Type'][entity_type] = uri_prefix + '#' + entity_type
return ld_context
def process_file(input_file, uri_prefix, predefined_mappings, terms_mappings):
if os.path.isfile(input_file) and input_file.endswith('schema.json'):
print(input_file)
aggregate_ld_context(
input_file,
uri_prefix,
predefined_mappings,
terms_mappings)
elif os.path.isdir(input_file):
for f in (os.listdir(input_file)):
process_file(os.path.join(input_file, f),
uri_prefix, predefined_mappings, terms_mappings)
def aggregate_ld_context(f, uri_prefix, predefined_mappings, terms_mappings):
global aggregated_context
global terms_list
global alert_list
schema = read_json(f)
ld_context = schema_2_ld_context(schema, uri_prefix, predefined_mappings)
for t in ld_context:
for p in ld_context[t]:
aggregated_context[p] = ld_context[t][p]
# adding related specifications and schemas
if p not in terms_list['terms']:
terms_list['terms'][p] = {'specifications': list(),
'schemas': list(),
'type': t}
terms_list['terms'][p]['schemas'].append(
schema_url.format(f.split('../')[1]))
file_to_add = find_file(f, terms_mappings)
if file_to_add:
terms_list['terms'][p]['specifications'].append(file_to_add)
else:
alert_list.append(f)
# Finds the specification file associated with the term
def find_file(f, terms_mappings):
try:
spec1 = os.path.join(f.rsplit('/', 1)[0], 'doc/spec.md')
spec2 = os.path.join(f.rsplit('/', 1)[0], 'doc/introduction.md')
if os.path.isfile(spec1):
path = str(spec1.split('../specs/')
[1]).split('/spec.md')[0] + '/spec/'
return specification_url.format(path)
elif os.path.isfile(spec2):
path = str(spec2.split('../specs/')
[1]).split('/introduction.md')[0] + '/introduction/'
return specification_url.format(path)
elif 'AgriFood' in f:
agri_type = f.split('AgriFood/')[1].split('/schema.json')[0]
if agri_type in terms_mappings:
return agri_url.format(terms_mappings[agri_type])
else:
return None
else:
return None
except UnboundLocalError:
pass
def write_context_file():
print('writing LD @context...' + ' size: ' + str(len(aggregated_context)))
ld_context = {
'@context': aggregated_context,
'generatedAt': datetime.now(timezone.utc).replace(microsecond=0).isoformat()
}
write_json(ld_context, 'context.jsonld')
write_yaml(terms_list, 'terms_list.yml')
def main(args):
uri_prefix = args.u
predefined_mappings = read_json('ldcontext_mappings.json')
terms_mappings = read_json('ldcontext_terms_mappings.json')
process_file(args.f, uri_prefix, predefined_mappings, terms_mappings)
write_context_file()
print("specification file was not found for this files")
print("\n".join(sorted(set(alert_list))))
# Entry point
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('-f', required=True, help='folder')
parser.add_argument('-u', required=True, help='URI prefix')
arguments = parser.parse_args()
main(arguments)
|
mit
| 1,676,945,092,126,814,700
| 28.383481
| 103
| 0.598434
| false
| 3.903213
| false
| false
| false
|
google/cauliflowervest
|
cauliflowervest/client/mac/main.py
|
1
|
3039
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CauliflowerVest client main entry module."""
import os
import pwd
from absl import app
from absl import flags
from cauliflowervest.client import base_flags
from cauliflowervest.client.mac import commandline
from cauliflowervest.client.mac import glue
from cauliflowervest.client.mac import tkinter
flags.DEFINE_bool('welcome', True, 'Show welcome message.')
flags.DEFINE_string(
'username', None, 'Username to use by default.', short_name='u')
flags.DEFINE_enum(
'action', None, commandline.ALL_ACTIONS.keys(),
'Action to perform (also suppresses GUI)', short_name='a')
flags.DEFINE_string('volume', None, 'UUID of volume')
exit_status = 1
def run_command_line(username, options):
"""Runs CauliflowerVest in command-line mode."""
if options.login_type == 'oauth2':
cmd = commandline.CommandLineOAuth2(options.server_url, username)
else:
raise NotImplementedError('Unsupported login type: %s',
options.login_type)
return cmd.Execute(options.action, options.volume)
def run_tkinter_gui(username, options):
"""Runs CauliflowerVest with a Tkinter GUI."""
if options.login_type == 'oauth2':
gui = tkinter.GuiOauth(options.server_url, username)
else:
raise NotImplementedError('Unsupported login type: %s',
options.login_type)
storage = glue.GetStorage()
if not storage:
gui.ShowFatalError('Could not determine File System type')
return 1
_, encrypted_volumes, _ = storage.GetStateAndVolumeIds()
try:
if encrypted_volumes:
gui.EncryptedVolumePrompt(status_callback=status_callback)
else:
gui.PlainVolumePrompt(options.welcome, status_callback=status_callback)
except Exception as e: # pylint: disable=broad-except
gui.ShowFatalError(e)
return 1
finally:
return exit_status # pylint: disable=lost-exception
def status_callback(status):
"""Callback routine to be passed into the gui to set the exit status.
Args:
status: Boolean: success or failure
"""
global exit_status
if status:
exit_status = 0
else:
exit_status = 1
@base_flags.HandleBaseFlags
def main(options):
if options.username:
username = options.username
else:
username = pwd.getpwuid(os.getuid()).pw_name
if options.action:
return run_command_line(username, options)
else:
return run_tkinter_gui(username, options)
if __name__ == '__main__':
app.run(main)
|
apache-2.0
| -5,189,660,262,082,169,000
| 28.504854
| 77
| 0.715038
| false
| 3.784558
| false
| false
| false
|
solvo/derb
|
report_builder/migrations/0001_initial.py
|
1
|
11231
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-12 19:47
from __future__ import unicode_literals
from django.conf import settings
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('async_notifications', '0002_auto_20160515_0018'),
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='Answer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('annotation', models.TextField(blank=True)),
('text', models.TextField(blank=True)),
('display_text', models.TextField(blank=True)),
],
options={
'verbose_name_plural': 'Answers',
'verbose_name': 'Answer',
},
),
migrations.CreateModel(
name='Observation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('context', models.TextField()),
('aproved', models.BooleanField(default=False)),
('answer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='report_builder.Answer')),
],
options={
'verbose_name_plural': 'Observations',
'verbose_name': 'Observation',
},
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=500)),
('object_id', models.PositiveIntegerField()),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
],
options={
'verbose_name_plural': 'Projects',
'verbose_name': 'Project',
},
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('class_to_load', models.CharField(max_length=30)),
('text', models.TextField()),
('help', models.TextField(blank=True)),
('answer_options', django.contrib.postgres.fields.jsonb.JSONField()),
('required', models.IntegerField(choices=[(0, 'Optional'), (1, 'Required'), (2, 'Required by hierarchy')], default=0)),
('order', models.CharField(blank=True, max_length=10)),
('auto', models.BooleanField(default=False)),
],
options={
'verbose_name_plural': 'Questions',
'verbose_name': 'Question',
},
),
migrations.CreateModel(
name='Report',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('template', django.contrib.postgres.fields.jsonb.JSONField(default=[{'human_name': 'General information', 'name': 'categ0', 'order': 0, 'subcategories': [{'human_name': 'General information', 'name': 'categ0_categ0', 'order': 0, 'question': [], 'questions': []}], 'subcategories_count': 1}])),
('questions', django.contrib.postgres.fields.jsonb.JSONField(default={})),
('opening_date', models.DateField()),
],
options={
'verbose_name_plural': 'Reports',
'verbose_name': 'Report',
},
),
migrations.CreateModel(
name='ReportByProject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start_date', models.DateField(verbose_name='Opening date')),
('submit_date', models.DateField(verbose_name='Submit date')),
('state', models.SmallIntegerField(choices=[(0, 'Submit pending'), (1, 'Unsubmitted'), (2, 'Aproved'), (3, 'Editing'), (4, 'Canceled'), (5, 'Rejected'), (6, 'In review')], default=0)),
('actions', models.TextField(blank=True, null=True)),
('review_percentage', models.SmallIntegerField(default=0)),
('complete', models.BooleanField(default=False)),
('make_another', models.BooleanField(default=False)),
('created_automatically', models.BooleanField(default=False)),
('creation_date', models.DateField(auto_now=True)),
('additional_info', models.TextField(blank=True, null=True)),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='report_builder.Project')),
('report', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='report_builder.Report')),
],
options={
'verbose_name_plural': 'Reports by project',
'verbose_name': 'Report by project',
},
),
migrations.CreateModel(
name='ReportType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.TextField()),
('app_name', models.SlugField()),
('name', models.SlugField()),
('action_ok', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='action_ok', to='async_notifications.EmailTemplate')),
('report_end', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='report_end', to='async_notifications.EmailTemplate')),
('report_start', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='report_start', to='async_notifications.EmailTemplate')),
('responsable_change', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='responsable_change', to='async_notifications.EmailTemplate')),
('revision_turn', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='revision_turn', to='async_notifications.EmailTemplate')),
],
options={
'verbose_name_plural': 'Report types',
'verbose_name': 'Report type',
},
),
migrations.CreateModel(
name='Reviewer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.SmallIntegerField(choices=[(1, 'First'), (2, 'Second'), (3, 'Third'), (4, 'Fourth'), (5, 'Fifth'), (6, 'Sixth'), (7, 'Seventh')])),
('state', models.SmallIntegerField(choices=[(0, 'Unsupported'), (1, 'Supported'), (2, 'In review'), (3, 'Supported by the system'), (4, 'Unsupported by the system')], default=0)),
('active', models.BooleanField(default=True)),
('make_observations', models.BooleanField(default=False)),
('can_ask', models.BooleanField(default=False)),
('can_review', models.BooleanField(default=False)),
('assigned_automatically', models.BooleanField(default=False)),
('report', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='report_builder.ReportByProject')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Reviewers',
'verbose_name': 'Reviewer',
},
),
migrations.CreateModel(
name='RevisionTree',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('assignment_criteria', models.CharField(max_length=100)),
('description', models.TextField()),
('report_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='report_builder.ReportType')),
],
options={
'verbose_name_plural': 'Revision Tree',
'verbose_name': 'Revision Tree',
},
),
migrations.CreateModel(
name='RevisionTreeUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.SmallIntegerField(choices=[(1, 'First'), (2, 'Second'), (3, 'Third'), (4, 'Fourth'), (5, 'Fifth'), (6, 'Sixth'), (7, 'Seventh')])),
('make_observations', models.BooleanField(default=True)),
('can_ask', models.BooleanField(default=True)),
('can_review', models.BooleanField(default=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Revision Tree Users',
'verbose_name': 'Revision Tree User',
},
),
migrations.AddField(
model_name='revisiontree',
name='revision_tree_user',
field=models.ManyToManyField(to='report_builder.RevisionTreeUser'),
),
migrations.AddField(
model_name='report',
name='type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='report_builder.ReportType'),
),
migrations.AddField(
model_name='question',
name='report',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='report_builder.Report'),
),
migrations.AddField(
model_name='observation',
name='reviewer',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='report_builder.Reviewer'),
),
migrations.AddField(
model_name='answer',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='report_builder.Question'),
),
migrations.AddField(
model_name='answer',
name='report',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='report_builder.ReportByProject'),
),
migrations.AddField(
model_name='answer',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
gpl-3.0
| -3,516,968,302,829,790,000
| 51.24186
| 310
| 0.562639
| false
| 4.390539
| false
| false
| false
|
wevote/WebAppPublic
|
voter/models.py
|
1
|
55344
|
# voter/models.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from django.db import models
from django.contrib.auth.models import (BaseUserManager, AbstractBaseUser) # PermissionsMixin
from django.core.validators import RegexValidator
from exception.models import handle_exception, handle_record_found_more_than_one_exception,\
handle_record_not_saved_exception
from validate_email import validate_email
import wevote_functions.admin
from wevote_functions.functions import convert_to_int, generate_voter_device_id, get_voter_device_id, \
get_voter_api_device_id, positive_value_exists
from wevote_settings.models import fetch_next_we_vote_id_last_voter_integer, fetch_site_unique_id_prefix
logger = wevote_functions.admin.get_logger(__name__)
# This way of extending the base user described here:
# https://docs.djangoproject.com/en/1.8/topics/auth/customizing/#a-full-example
# I then altered with this: http://buildthis.com/customizing-djangos-default-user-model/
# class VoterTwitterLink(models.Model):
# voter_id
# twitter_handle
# confirmed_signin_date
# See AUTH_USER_MODEL in config/base.py
class VoterManager(BaseUserManager):
def create_user(self, email=None, username=None, password=None):
"""
Creates and saves a User with the given email and password.
"""
email = self.normalize_email(email)
user = self.model(email=self.normalize_email(email))
# python-social-auth will pass the username and email
if username:
user.fb_username = username
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
"""
Creates and saves a superuser with the given email and password.
"""
user = self.create_user(email, password=password)
user.is_admin = True
user.save(using=self._db)
return user
def create_voter(self, email=None, password=None):
email = self.normalize_email(email)
email_not_valid = False
password_not_valid = False
voter = Voter()
voter_id = 0
try:
if validate_email(email):
voter.email = email
else:
email_not_valid = True
if password:
voter.set_password(password)
else:
password_not_valid = True
voter.save()
voter_id = voter.id
except voter.IntegrityError as e:
handle_record_not_saved_exception(e, logger=logger)
try:
# Trying to save again will increment the 'we_vote_id_last_voter_integer'
# by calling 'fetch_next_we_vote_id_last_voter_integer'
# TODO We could get into a race condition where multiple creates could be failing at once, so we
# should look more closely at this
voter.save()
voter_id = voter.id
except voter.IntegrityError as e:
handle_record_not_saved_exception(e, logger=logger)
except Exception as e:
handle_record_not_saved_exception(e, logger=logger)
except Exception as e:
handle_record_not_saved_exception(e, logger=logger)
results = {
'email_not_valid': email_not_valid,
'password_not_valid': password_not_valid,
'voter_created': True if voter_id > 0 else False,
'voter': voter,
}
return results
def delete_voter(self, email):
email = self.normalize_email(email)
voter_id = 0
voter_we_vote_id = ''
voter_deleted = False
if positive_value_exists(email) and validate_email(email):
email_valid = True
else:
email_valid = False
try:
if email_valid:
results = self.retrieve_voter(voter_id, email, voter_we_vote_id)
if results['voter_found']:
voter = results['voter']
voter_id = voter.id
voter.delete()
voter_deleted = True
except Exception as e:
handle_exception(e, logger=logger)
results = {
'email_not_valid': True if not email_valid else False,
'voter_deleted': voter_deleted,
'voter_id': voter_id,
}
return results
def retrieve_voter_from_voter_device_id(self, voter_device_id):
voter_id = fetch_voter_id_from_voter_device_link(voter_device_id)
if not voter_id:
results = {
'voter_found': False,
'voter_id': 0,
'voter': Voter(),
}
return results
voter_manager = VoterManager()
results = voter_manager.retrieve_voter_by_id(voter_id)
if results['voter_found']:
voter_on_stage = results['voter']
voter_on_stage_found = True
voter_id = results['voter_id']
else:
voter_on_stage = Voter()
voter_on_stage_found = False
voter_id = 0
results = {
'voter_found': voter_on_stage_found,
'voter_id': voter_id,
'voter': voter_on_stage,
}
return results
def fetch_we_vote_id_from_local_id(self, voter_id):
results = self.retrieve_voter_by_id(voter_id)
if results['voter_found']:
voter = results['voter']
return voter.we_vote_id
else:
return None
def fetch_local_id_from_we_vote_id(self, voter_we_vote_id):
results = self.retrieve_voter_by_we_vote_id(voter_we_vote_id)
if results['voter_found']:
voter = results['voter']
return voter.id
else:
return 0
def retrieve_voter_by_id(self, voter_id):
email = ''
voter_we_vote_id = ''
voter_manager = VoterManager()
return voter_manager.retrieve_voter(voter_id, email, voter_we_vote_id)
def retrieve_voter_by_we_vote_id(self, voter_we_vote_id):
voter_id = ''
email = ''
voter_manager = VoterManager()
return voter_manager.retrieve_voter(voter_id, email, voter_we_vote_id)
def retrieve_voter_by_twitter_request_token(self, twitter_request_token):
voter_id = ''
email = ''
voter_we_vote_id = ''
voter_manager = VoterManager()
return voter_manager.retrieve_voter(voter_id, email, voter_we_vote_id, twitter_request_token)
def retrieve_voter_by_facebook_id(self, facebook_id):
voter_id = ''
email = ''
voter_we_vote_id = ''
twitter_request_token = ''
voter_manager = VoterManager()
return voter_manager.retrieve_voter(voter_id, email, voter_we_vote_id, twitter_request_token, facebook_id)
def retrieve_voter_by_twitter_id(self, twitter_id):
voter_id = ''
email = ''
voter_we_vote_id = ''
twitter_request_token = ''
facebook_id = 0
voter_manager = VoterManager()
return voter_manager.retrieve_voter(voter_id, email, voter_we_vote_id, twitter_request_token, facebook_id,
twitter_id)
def retrieve_voter_from_organization_we_vote_id(self, organization_we_vote_id):
voter_id = ''
email = ''
voter_we_vote_id = ''
twitter_request_token = ''
facebook_id = 0
twitter_id = 0
voter_manager = VoterManager()
return voter_manager.retrieve_voter(voter_id, email, voter_we_vote_id, twitter_request_token, facebook_id,
twitter_id, organization_we_vote_id)
def retrieve_voter(self, voter_id, email='', voter_we_vote_id='', twitter_request_token='', facebook_id=0,
twitter_id=0, organization_we_vote_id=''):
voter_id = convert_to_int(voter_id)
if not validate_email(email):
# We do not want to search for an invalid email
email = None
if positive_value_exists(voter_we_vote_id):
voter_we_vote_id = voter_we_vote_id.strip().lower()
if positive_value_exists(organization_we_vote_id):
organization_we_vote_id = organization_we_vote_id.strip().lower()
error_result = False
exception_does_not_exist = False
exception_multiple_object_returned = False
voter_on_stage = Voter()
try:
if positive_value_exists(voter_id):
voter_on_stage = Voter.objects.get(id=voter_id)
# If still here, we found an existing voter
voter_id = voter_on_stage.id
elif email is not '' and email is not None:
voter_on_stage = Voter.objects.get(
email=email)
# If still here, we found an existing voter
voter_id = voter_on_stage.id
elif positive_value_exists(voter_we_vote_id):
voter_on_stage = Voter.objects.get(
we_vote_id=voter_we_vote_id)
# If still here, we found an existing voter
voter_id = voter_on_stage.id
elif positive_value_exists(twitter_request_token):
voter_on_stage = Voter.objects.get(
twitter_request_token=twitter_request_token)
# If still here, we found an existing voter
voter_id = voter_on_stage.id
elif positive_value_exists(facebook_id):
voter_on_stage = Voter.objects.get(
facebook_id=facebook_id)
# If still here, we found an existing voter
voter_id = voter_on_stage.id
elif positive_value_exists(twitter_id):
voter_on_stage = Voter.objects.get(
twitter_id=twitter_id)
# If still here, we found an existing voter
voter_id = voter_on_stage.id
elif positive_value_exists(organization_we_vote_id):
voter_on_stage = Voter.objects.get(
linked_organization_we_vote_id=organization_we_vote_id)
# If still here, we found an existing voter
voter_id = voter_on_stage.id
else:
voter_id = 0
error_result = True
except Voter.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
error_result = True
exception_multiple_object_returned = True
except Voter.DoesNotExist as e:
error_result = True
exception_does_not_exist = True
results = {
'error_result': error_result,
'DoesNotExist': exception_does_not_exist,
'MultipleObjectsReturned': exception_multiple_object_returned,
'voter_found': True if voter_id > 0 else False,
'voter_id': voter_id,
'voter': voter_on_stage,
}
return results
def create_voter_with_voter_device_id(self, voter_device_id):
logger.info("create_voter_with_voter_device_id(voter_device_id)")
def clear_out_abandoned_voter_records(self):
# We will need a method that identifies and deletes abandoned voter records that don't have enough information
# to ever be used
logger.info("clear_out_abandoned_voter_records")
def save_facebook_user_values(self, voter, facebook_id, facebook_email=''):
try:
if facebook_id == 0:
voter.facebook_id = 0
elif positive_value_exists(facebook_id):
voter.facebook_id = facebook_id
if facebook_email == '' or facebook_email is False:
voter.facebook_email = ''
elif positive_value_exists(facebook_email):
voter.facebook_email = facebook_email
voter.save()
success = True
status = "SAVED_VOTER_FACEBOOK_VALUES"
except Exception as e:
status = "UNABLE_TO_SAVE_VOTER_FACEBOOK_VALUES"
success = False
handle_record_not_saved_exception(e, logger=logger, exception_message_optional=status)
results = {
'status': status,
'success': success,
'voter': voter,
}
return results
def save_twitter_user_values(self, voter, twitter_user_object):
try:
# 'id': 132728535,
if positive_value_exists(twitter_user_object.id):
voter.twitter_id = twitter_user_object.id
# 'id_str': '132728535',
# 'utc_offset': 32400,
# 'description': "Cars, Musics, Games, Electronics, toys, food, etc... I'm just a typical boy!",
# 'profile_image_url': 'http://a1.twimg.com/profile_images/1213351752/_2_2__normal.jpg',
if positive_value_exists(twitter_user_object.profile_image_url_https):
voter.twitter_profile_image_url_https = twitter_user_object.profile_image_url_https
# 'profile_background_image_url': 'http://a2.twimg.com/a/1294785484/images/themes/theme15/bg.png',
# 'screen_name': 'jaeeeee',
if positive_value_exists(twitter_user_object.screen_name):
voter.twitter_screen_name = twitter_user_object.screen_name
# 'lang': 'en',
# 'name': 'Jae Jung Chung',
# 'url': 'http://www.carbonize.co.kr',
# 'time_zone': 'Seoul',
voter.save()
success = True
status = "SAVED_VOTER_TWITTER_VALUES"
except Exception as e:
status = "UNABLE_TO_SAVE_VOTER_TWITTER_VALUES"
success = False
handle_record_not_saved_exception(e, logger=logger, exception_message_optional=status)
results = {
'status': status,
'success': success,
'voter': voter,
}
return results
def save_twitter_user_values_from_dict(self, voter, twitter_user_dict):
try:
# 'id': 132728535,
if 'id' in twitter_user_dict:
voter.twitter_id = twitter_user_dict['id']
# 'id_str': '132728535',
# 'utc_offset': 32400,
# 'description': "Cars, Musics, Games, Electronics, toys, food, etc... I'm just a typical boy!",
# 'profile_image_url': 'http://a1.twimg.com/profile_images/1213351752/_2_2__normal.jpg',
if 'profile_image_url_https' in twitter_user_dict:
voter.twitter_profile_image_url_https = twitter_user_dict['profile_image_url_https']
# 'profile_background_image_url': 'http://a2.twimg.com/a/1294785484/images/themes/theme15/bg.png',
# 'screen_name': 'jaeeeee',
if 'screen_name' in twitter_user_dict:
voter.twitter_screen_name = twitter_user_dict['screen_name']
# 'lang': 'en',
# 'name': 'Jae Jung Chung',
# 'url': 'http://www.carbonize.co.kr',
# 'time_zone': 'Seoul',
voter.save()
success = True
status = "SAVED_VOTER_TWITTER_VALUES"
except Exception as e:
status = "UNABLE_TO_SAVE_VOTER_TWITTER_VALUES"
success = False
handle_record_not_saved_exception(e, logger=logger, exception_message_optional=status)
results = {
'status': status,
'success': success,
'voter': voter,
}
return results
def update_voter_photos(self, voter_id, facebook_profile_image_url_https, facebook_photo_variable_exists):
results = self.retrieve_voter(voter_id)
if results['voter_found']:
voter = results['voter']
try:
if facebook_photo_variable_exists:
voter.facebook_profile_image_url_https = facebook_profile_image_url_https
voter.save()
status = "SAVED_VOTER_PHOTOS"
success = True
except Exception as e:
status = "UNABLE_TO_SAVE_VOTER_PHOTOS"
success = False
handle_record_not_saved_exception(e, logger=logger, exception_message_optional=status)
else:
# If here, we were unable to find pre-existing Voter
status = "UNABLE_TO_FIND_VOTER_FOR_UPDATE_VOTER_PHOTOS"
voter = Voter()
success = False
results = {
'status': status,
'success': success,
'voter': voter,
}
return results
def update_voter(self, voter_id, facebook_email, facebook_profile_image_url_https,
first_name, middle_name, last_name,
twitter_profile_image_url_https):
voter_updated = False
results = self.retrieve_voter(voter_id)
if results['voter_found']:
voter = results['voter']
try:
should_save_voter = False
if facebook_email is not False:
voter.facebook_email = facebook_email
should_save_voter = True
if facebook_profile_image_url_https is not False:
voter.facebook_profile_image_url_https = facebook_profile_image_url_https
should_save_voter = True
if first_name is not False:
voter.first_name = first_name
should_save_voter = True
if middle_name is not False:
voter.middle_name = middle_name
should_save_voter = True
if last_name is not False:
voter.last_name = last_name
should_save_voter = True
if twitter_profile_image_url_https is not False:
voter.last_name = last_name
should_save_voter = True
if should_save_voter:
voter.save()
voter_updated = True
status = "UPDATED_VOTER"
success = True
except Exception as e:
status = "UNABLE_TO_UPDATE_VOTER"
success = False
voter_updated = False
else:
# If here, we were unable to find pre-existing Voter
status = "UNABLE_TO_FIND_VOTER_FOR_UPDATE_VOTER"
voter = Voter()
success = False
voter_updated = False
results = {
'status': status,
'success': success,
'voter': voter,
'voter_updated': voter_updated,
}
return results
class Voter(AbstractBaseUser):
"""
A fully featured User model with admin-compliant permissions that uses
a full-length email field as the username.
No fields are required, since at its very simplest, we only need the voter_id based on a voter_device_id.
"""
alphanumeric = RegexValidator(r'^[0-9a-zA-Z]*$', message='Only alphanumeric characters are allowed.')
# The we_vote_id identifier is unique across all We Vote sites, and allows us to share our voter info with other
# organizations running the we_vote server
# It starts with "wv" then we add on a database specific identifier like "3v" (WeVoteSetting.site_unique_id_prefix)
# then the string "voter", and then a sequential integer like "123".
# We keep the last value in WeVoteSetting.we_vote_id_last_org_integer
we_vote_id = models.CharField(
verbose_name="we vote permanent id", max_length=255, null=True, blank=True, unique=True)
# When a person using an organization's Twitter handle signs in, we create a voter account. This is how
# we link the voter account to the organization.
linked_organization_we_vote_id = models.CharField(
verbose_name="we vote id for linked organization", max_length=255, null=True, blank=True, unique=True)
# Redefine the basic fields that would normally be defined in User
# username = models.CharField(unique=True, max_length=20, validators=[alphanumeric]) # Increase max_length to 255
email = models.EmailField(verbose_name='email address', max_length=255, unique=True, null=True, blank=True)
first_name = models.CharField(verbose_name='first name', max_length=255, null=True, blank=True)
middle_name = models.CharField(max_length=255, null=True, blank=True)
last_name = models.CharField(verbose_name='last name', max_length=255, null=True, blank=True)
date_joined = models.DateTimeField(verbose_name='date joined', auto_now_add=True)
is_active = models.BooleanField(default=True)
is_admin = models.BooleanField(default=False)
is_verified_volunteer = models.BooleanField(default=False)
# Facebook session information
facebook_id = models.BigIntegerField(verbose_name="facebook big integer id", null=True, blank=True)
facebook_email = models.EmailField(verbose_name='facebook email address', max_length=255, unique=False,
null=True, blank=True)
fb_username = models.CharField(unique=True, max_length=20, validators=[alphanumeric], null=True)
facebook_profile_image_url_https = models.URLField(verbose_name='url of image from facebook', blank=True, null=True)
# Twitter session information
twitter_id = models.BigIntegerField(verbose_name="twitter big integer id", null=True, blank=True)
twitter_screen_name = models.CharField(verbose_name='twitter screen name / handle',
max_length=255, null=True, unique=False)
twitter_profile_image_url_https = models.URLField(verbose_name='url of logo from twitter', blank=True, null=True)
twitter_request_token = models.TextField(verbose_name='twitter request token', null=True, blank=True)
twitter_request_secret = models.TextField(verbose_name='twitter request secret', null=True, blank=True)
twitter_access_token = models.TextField(verbose_name='twitter access token', null=True, blank=True)
twitter_access_secret = models.TextField(verbose_name='twitter access secret', null=True, blank=True)
twitter_connection_active = models.BooleanField(default=False)
# Custom We Vote fields
# image_displayed
# image_twitter
# image_facebook
# blocked
# flags (ex/ signed_in)
# password_hashed
# password_reset_key
# password_reset_request_time
# last_activity
# The unique ID of the election this voter is currently looking at. (Provided by Google Civic)
# DALE 2015-10-29 We are replacing this with looking up the value in the ballot_items table, and then
# storing in cookie
# current_google_civic_election_id = models.PositiveIntegerField(
# verbose_name="google civic election id", null=True, unique=False)
objects = VoterManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = [] # Since we need to store a voter based solely on voter_device_id, no values are required
# We override the save function to allow for the email field to be empty. If NOT empty, email must be unique.
# We also want to auto-generate we_vote_id
def save(self, *args, **kwargs):
if self.email:
self.email = self.email.lower().strip() # Hopefully reduces junk to ""
if self.email != "": # If it's not blank
if not validate_email(self.email): # ...make sure it is a valid email
# If it isn't a valid email, don't save the value as an email -- just save a blank field
self.email = None
if self.email == "":
self.email = None
if self.we_vote_id:
self.we_vote_id = self.we_vote_id.strip().lower()
if self.we_vote_id == "" or self.we_vote_id is None: # If there isn't a value...
# ...generate a new id
site_unique_id_prefix = fetch_site_unique_id_prefix()
next_local_integer = fetch_next_we_vote_id_last_voter_integer()
# "wv" = We Vote
# site_unique_id_prefix = a generated (or assigned) unique id for one server running We Vote
# "voter" = tells us this is a unique id for an org
# next_integer = a unique, sequential integer for this server - not necessarily tied to database id
self.we_vote_id = "wv{site_unique_id_prefix}voter{next_integer}".format(
site_unique_id_prefix=site_unique_id_prefix,
next_integer=next_local_integer,
)
# TODO we need to deal with the situation where we_vote_id is NOT unique on save
super(Voter, self).save(*args, **kwargs)
def get_full_name(self):
full_name = self.first_name if positive_value_exists(self.first_name) else ''
full_name += " " if positive_value_exists(self.first_name) and positive_value_exists(self.last_name) else ''
full_name += self.last_name if positive_value_exists(self.last_name) else ''
return full_name
def get_short_name(self):
# return self.first_name
# The user is identified by their email address
return self.email
def voter_can_retrieve_account(self):
if positive_value_exists(self.email):
return True
else:
return False
def __str__(self): # __unicode__ on Python 2
# return self.get_full_name(self)
return str(self.email)
def has_perm(self, perm, obj=None):
"""
Does the user have a specific permission?
"""
# Simplest possible answer: Yes, always
return True
def has_module_perms(self, app_label):
"""
Does the user have permissions to view the app `app_label`?
"""
# Simplest possible answer: Yes, always
return True
@property
def is_staff(self):
"""
Is the user a member of staff?
"""
# Simplest possible answer: All admins are staff
return self.is_admin
def voter_photo_url(self):
if self.facebook_profile_image_url_https:
return self.facebook_profile_image_url_https
elif self.twitter_profile_image_url_https:
return self.twitter_profile_image_url_https
return ''
def signed_in_personal(self):
if positive_value_exists(self.email) or self.signed_in_facebook() or self.signed_in_twitter():
# or positive_value_exists(self.is_authenticated()):
return True
return False
def signed_in_facebook(self):
if positive_value_exists(self.facebook_id):
return True
return False
def signed_in_google(self):
return False
def signed_in_twitter(self):
if positive_value_exists(self.twitter_access_token):
return True
return False
class VoterDeviceLink(models.Model):
"""
There can be many voter_device_id's for every voter_id. (See commentary in class VoterDeviceLinkManager)
"""
# The id for this object is not used in any searches
# A randomly generated identifier that gets stored as a cookie on a single device
# See wevote_functions.functions, function generate_voter_device_id for a discussion of voter_device_id length
voter_device_id = models.CharField(verbose_name='voter device id',
max_length=255, null=False, blank=False, unique=True)
# The voter_id associated with voter_device_id
voter_id = models.BigIntegerField(verbose_name="voter unique identifier", null=False, blank=False, unique=False)
# The unique ID of the election (provided by Google Civic) that the voter is looking at on this device
google_civic_election_id = models.PositiveIntegerField(
verbose_name="google civic election id", default=0, null=False)
def generate_voter_device_id(self):
# A simple mapping to this function
return generate_voter_device_id()
class VoterDeviceLinkManager(models.Model):
"""
In order to start gathering information about a voter prior to authentication, we use a long randomized string
stored as a browser cookie. As soon as we get any other identifiable information from a voter (like an email
address), we capture that so the Voter record can be portable among devices. Note that any voter might be using
We Vote from different browsers. The VoterDeviceLink links one or more voter_device_id's to one voter_id.
Since (prior to authentication) every voter_device_id will have its own voter_id record, we merge and delete Voter
records whenever we can.
"""
def __str__(self): # __unicode__ on Python 2
return "Voter Device Id Manager"
def delete_all_voter_device_links(self, voter_device_id):
voter_id = fetch_voter_id_from_voter_device_link(voter_device_id)
try:
if positive_value_exists(voter_id):
VoterDeviceLink.objects.filter(voter_id=voter_id).delete()
status = "DELETE_ALL_VOTER_DEVICE_LINKS_SUCCESSFUL"
success = True
else:
status = "DELETE_ALL_VOTER_DEVICE_LINKS-MISSING_VARIABLES"
success = False
except Exception as e:
status = "DELETE_ALL_VOTER_DEVICE_LINKS-DATABASE_DELETE_EXCEPTION"
success = False
results = {
'success': success,
'status': status,
}
return results
def delete_voter_device_link(self, voter_device_id):
try:
if positive_value_exists(voter_device_id):
VoterDeviceLink.objects.filter(voter_device_id=voter_device_id).delete()
status = "DELETE_VOTER_DEVICE_LINK_SUCCESSFUL"
success = True
else:
status = "DELETE_VOTER_DEVICE_LINK-MISSING_VARIABLES"
success = False
except Exception as e:
status = "DELETE_VOTER_DEVICE_LINK-DATABASE_DELETE_EXCEPTION"
success = False
results = {
'success': success,
'status': status,
}
return results
def retrieve_voter_device_link_from_voter_device_id(self, voter_device_id):
voter_id = 0
voter_device_link_id = 0
voter_device_link_manager = VoterDeviceLinkManager()
results = voter_device_link_manager.retrieve_voter_device_link(voter_device_id, voter_id, voter_device_link_id)
return results
def retrieve_voter_device_link(self, voter_device_id, voter_id=0, voter_device_link_id=0):
error_result = False
exception_does_not_exist = False
exception_multiple_object_returned = False
voter_device_link_on_stage = VoterDeviceLink()
try:
if positive_value_exists(voter_device_id):
voter_device_link_on_stage = VoterDeviceLink.objects.get(voter_device_id=voter_device_id)
voter_device_link_id = voter_device_link_on_stage.id
elif positive_value_exists(voter_id):
voter_device_link_on_stage = VoterDeviceLink.objects.get(voter_id=voter_id)
# If still here, we found an existing position
voter_device_link_id = voter_device_link_on_stage.id
elif positive_value_exists(voter_device_link_id):
voter_device_link_on_stage = VoterDeviceLink.objects.get(id=voter_device_link_id)
# If still here, we found an existing position
voter_device_link_id = voter_device_link_on_stage.id
else:
voter_device_link_id = 0
except VoterDeviceLink.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
error_result = True
exception_multiple_object_returned = True
except VoterDeviceLink.DoesNotExist:
error_result = True
exception_does_not_exist = True
results = {
'error_result': error_result,
'DoesNotExist': exception_does_not_exist,
'MultipleObjectsReturned': exception_multiple_object_returned,
'voter_device_link_found': True if voter_device_link_id > 0 else False,
'voter_device_link': voter_device_link_on_stage,
}
return results
def save_new_voter_device_link(self, voter_device_id, voter_id):
error_result = False
exception_record_not_saved = False
missing_required_variables = False
voter_device_link_on_stage = VoterDeviceLink()
voter_device_link_id = 0
try:
if positive_value_exists(voter_device_id) and positive_value_exists(voter_id):
voter_device_link_on_stage.voter_device_id = voter_device_id
voter_device_link_on_stage.voter_id = voter_id
voter_device_link_on_stage.save()
voter_device_link_id = voter_device_link_on_stage.id
else:
missing_required_variables = True
voter_device_link_id = 0
except Exception as e:
handle_record_not_saved_exception(e, logger=logger)
error_result = True
exception_record_not_saved = True
results = {
'error_result': error_result,
'missing_required_variables': missing_required_variables,
'RecordNotSaved': exception_record_not_saved,
'voter_device_link_created': True if voter_device_link_id > 0 else False,
'voter_device_link': voter_device_link_on_stage,
}
return results
def update_voter_device_link_with_election_id(self, voter_device_link, google_civic_election_id):
voter_object = None
return self.update_voter_device_link(voter_device_link, voter_object, google_civic_election_id)
def update_voter_device_link(self, voter_device_link, voter_object=None, google_civic_election_id=0):
"""
Update existing voter_device_link with a new voter_id or google_civic_election_id
"""
error_result = False
exception_record_not_saved = False
missing_required_variables = False
voter_device_link_id = 0
try:
if positive_value_exists(voter_device_link.voter_device_id):
if voter_object and positive_value_exists(voter_object.id):
voter_device_link.voter_id = voter_object.id
if positive_value_exists(google_civic_election_id):
voter_device_link.google_civic_election_id = google_civic_election_id
elif google_civic_election_id == 0:
# If set literally to 0, save it
voter_device_link.google_civic_election_id = 0
voter_device_link.save()
voter_device_link_id = voter_device_link.id
else:
missing_required_variables = True
voter_device_link_id = 0
except Exception as e:
handle_record_not_saved_exception(e, logger=logger)
error_result = True
exception_record_not_saved = True
results = {
'error_result': error_result,
'missing_required_variables': missing_required_variables,
'RecordNotSaved': exception_record_not_saved,
'voter_device_link_updated': True if voter_device_link_id > 0 else False,
'voter_device_link': voter_device_link,
}
return results
# This method *just* returns the voter_id or 0
def fetch_voter_id_from_voter_device_link(voter_device_id):
voter_device_link_manager = VoterDeviceLinkManager()
results = voter_device_link_manager.retrieve_voter_device_link_from_voter_device_id(voter_device_id)
if results['voter_device_link_found']:
voter_device_link = results['voter_device_link']
return voter_device_link.voter_id
return 0
# This method *just* returns the voter_id or 0
def fetch_voter_id_from_voter_we_vote_id(we_vote_id):
voter_manager = VoterManager()
results = voter_manager.retrieve_voter_by_we_vote_id(we_vote_id)
if results['voter_found']:
voter = results['voter']
return voter.id
return 0
# This method *just* returns the voter_we_vote_id or ""
def fetch_voter_we_vote_id_from_voter_id(voter_id):
voter_manager = VoterManager()
results = voter_manager.retrieve_voter_by_id(voter_id)
if results['voter_found']:
voter = results['voter']
return voter.we_vote_id
return ""
def retrieve_voter_authority(request):
voter_api_device_id = get_voter_api_device_id(request)
voter_manager = VoterManager()
results = voter_manager.retrieve_voter_from_voter_device_id(voter_api_device_id)
if results['voter_found']:
voter = results['voter']
authority_results = {
'voter_found': True,
'is_active': positive_value_exists(voter.is_active),
'is_admin': positive_value_exists(voter.is_admin),
'is_verified_volunteer': positive_value_exists(voter.is_verified_volunteer),
}
return authority_results
authority_results = {
'voter_found': False,
'is_active': False,
'is_admin': False,
'is_verified_volunteer': False,
}
return authority_results
def voter_has_authority(request, authority_required, authority_results=None):
if not authority_results:
authority_results = retrieve_voter_authority(request)
if not positive_value_exists(authority_results['is_active']):
return False
if 'admin' in authority_required:
if positive_value_exists(authority_results['is_admin']):
return True
if 'verified_volunteer' in authority_required:
if positive_value_exists(authority_results['is_verified_volunteer']) or \
positive_value_exists(authority_results['is_admin']):
return True
return False
# class VoterJurisdictionLink(models.Model):
# """
# All of the jurisdictions the Voter is in
# """
# voter = models.ForeignKey(Voter, null=False, blank=False, verbose_name='voter')
# jurisdiction = models.ForeignKey(Jurisdiction,
# null=False, blank=False, verbose_name="jurisdiction this voter votes in")
BALLOT_ADDRESS = 'B'
MAILING_ADDRESS = 'M'
FORMER_BALLOT_ADDRESS = 'F'
ADDRESS_TYPE_CHOICES = (
(BALLOT_ADDRESS, 'Address Where Registered to Vote'),
(MAILING_ADDRESS, 'Mailing Address'),
(FORMER_BALLOT_ADDRESS, 'Prior Address'),
)
class VoterAddress(models.Model):
"""
An address of a registered voter for ballot purposes.
"""
#
# We are relying on built-in Python id field
# The voter_id that owns this address
voter_id = models.BigIntegerField(verbose_name="voter unique identifier", null=False, blank=False, unique=False)
address_type = models.CharField(
verbose_name="type of address", max_length=1, choices=ADDRESS_TYPE_CHOICES, default=BALLOT_ADDRESS)
text_for_map_search = models.CharField(max_length=255, blank=False, null=False, verbose_name='address as entered')
latitude = models.CharField(max_length=255, blank=True, null=True, verbose_name='latitude returned from Google')
longitude = models.CharField(max_length=255, blank=True, null=True, verbose_name='longitude returned from Google')
normalized_line1 = models.CharField(max_length=255, blank=True, null=True,
verbose_name='normalized address line 1 returned from Google')
normalized_line2 = models.CharField(max_length=255, blank=True, null=True,
verbose_name='normalized address line 2 returned from Google')
normalized_city = models.CharField(max_length=255, blank=True, null=True,
verbose_name='normalized city returned from Google')
normalized_state = models.CharField(max_length=255, blank=True, null=True,
verbose_name='normalized state returned from Google')
normalized_zip = models.CharField(max_length=255, blank=True, null=True,
verbose_name='normalized zip returned from Google')
# This is the election_id last found for this address
google_civic_election_id = models.PositiveIntegerField(
verbose_name="google civic election id for this address", null=True, unique=False)
# The last election day this address was used to retrieve a ballot
election_day_text = models.CharField(verbose_name="election day", max_length=255, null=True, blank=True)
refreshed_from_google = models.BooleanField(
verbose_name="have normalized fields been updated from Google since address change?", default=False)
class VoterAddressManager(models.Model):
def __unicode__(self):
return "VoterAddressManager"
def retrieve_ballot_address_from_voter_id(self, voter_id):
voter_address_id = 0
address_type = BALLOT_ADDRESS
voter_address_manager = VoterAddressManager()
return voter_address_manager.retrieve_address(voter_address_id, voter_id, address_type)
def retrieve_ballot_map_text_from_voter_id(self, voter_id):
results = self.retrieve_ballot_address_from_voter_id(voter_id)
ballot_map_text = ''
if results['voter_address_found']:
voter_address = results['voter_address']
minimum_normalized_address_data_exists = positive_value_exists(
voter_address.normalized_city) or positive_value_exists(
voter_address.normalized_state) or positive_value_exists(voter_address.normalized_zip)
if minimum_normalized_address_data_exists:
ballot_map_text += voter_address.normalized_line1 \
if positive_value_exists(voter_address.normalized_line1) else ''
ballot_map_text += ", " \
if positive_value_exists(voter_address.normalized_line1) \
and positive_value_exists(voter_address.normalized_city) \
else ''
ballot_map_text += voter_address.normalized_city \
if positive_value_exists(voter_address.normalized_city) else ''
ballot_map_text += ", " \
if positive_value_exists(voter_address.normalized_city) \
and positive_value_exists(voter_address.normalized_state) \
else ''
ballot_map_text += voter_address.normalized_state \
if positive_value_exists(voter_address.normalized_state) else ''
ballot_map_text += " " + voter_address.normalized_zip \
if positive_value_exists(voter_address.normalized_zip) else ''
elif positive_value_exists(voter_address.text_for_map_search):
ballot_map_text += voter_address.text_for_map_search
return ballot_map_text
def retrieve_address(self, voter_address_id, voter_id=0, address_type=''):
error_result = False
exception_does_not_exist = False
exception_multiple_object_returned = False
voter_address_on_stage = VoterAddress()
voter_address_has_value = False
if not positive_value_exists(address_type):
# Provide a default
address_type = BALLOT_ADDRESS
try:
if positive_value_exists(voter_address_id):
voter_address_on_stage = VoterAddress.objects.get(id=voter_address_id)
voter_address_id = voter_address_on_stage.id
voter_address_found = True
status = "VOTER_ADDRESS_FOUND_BY_ID"
success = True
voter_address_has_value = True if positive_value_exists(voter_address_on_stage.text_for_map_search) \
else False
elif positive_value_exists(voter_id) and address_type in (BALLOT_ADDRESS, MAILING_ADDRESS,
FORMER_BALLOT_ADDRESS):
voter_address_on_stage = VoterAddress.objects.get(voter_id=voter_id, address_type=address_type)
# If still here, we found an existing address
voter_address_id = voter_address_on_stage.id
voter_address_found = True
status = "VOTER_ADDRESS_FOUND_BY_VOTER_ID_AND_ADDRESS_TYPE"
success = True
voter_address_has_value = True if positive_value_exists(voter_address_on_stage.text_for_map_search) \
else False
else:
voter_address_found = False
status = "VOTER_ADDRESS_NOT_FOUND-MISSING_REQUIRED_VARIABLES"
success = False
except VoterAddress.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
error_result = True
status = "VOTER_ADDRESS_MULTIPLE_OBJECTS_RETURNED"
exception_multiple_object_returned = True
success = False
voter_address_found = False
except VoterAddress.DoesNotExist:
error_result = True
status = "VOTER_ADDRESS_DOES_NOT_EXIST"
exception_does_not_exist = True
success = True
voter_address_found = False
results = {
'success': success,
'status': status,
'error_result': error_result,
'DoesNotExist': exception_does_not_exist,
'MultipleObjectsReturned': exception_multiple_object_returned,
'voter_address_found': voter_address_found,
'voter_address_has_value': voter_address_has_value,
'voter_address_id': voter_address_id,
'voter_address': voter_address_on_stage,
}
return results
# # TODO TEST THIS
# def retrieve_addresses(self, voter_id):
# error_result = False
# exception_does_not_exist = False
# # voter_addresses_on_stage = # How to typecast?
# number_of_addresses = 0
#
# try:
# if voter_id > 0:
# voter_addresses_on_stage = VoterAddress.objects.get(voter_id=voter_id)
# number_of_addresses = len(voter_addresses_on_stage)
# except VoterAddress.DoesNotExist:
# error_result = True
# exception_does_not_exist = True
#
# results = {
# 'error_result': error_result,
# 'DoesNotExist': exception_does_not_exist,
# 'voter_addresses_found': True if number_of_addresses > 0 else False,
# 'voter_addresses_on_stage': voter_addresses_on_stage,
# 'number_of_addresses': number_of_addresses,
# }
# return results
def update_or_create_voter_address(self, voter_id, address_type, raw_address_text):
"""
NOTE: This approach won't support multiple FORMER_BALLOT_ADDRESS
:param voter_id:
:param address_type:
:param raw_address_text:
:return:
"""
status = ''
exception_multiple_object_returned = False
new_address_created = False
voter_address_on_stage = None
voter_address_on_stage_found = False
if positive_value_exists(voter_id) and address_type in (BALLOT_ADDRESS, MAILING_ADDRESS, FORMER_BALLOT_ADDRESS):
try:
updated_values = {
# Values we search against
'voter_id': voter_id,
'address_type': address_type,
# The rest of the values are to be saved
'text_for_map_search': raw_address_text,
'latitude': None,
'longitude': None,
'normalized_line1': None,
'normalized_line2': None,
'normalized_city': None,
'normalized_state': None,
'normalized_zip': None,
# We clear out former values for these so voter_ballot_items_retrieve_for_api resets them
'refreshed_from_google': False,
'google_civic_election_id': 0,
'election_day_text': '',
}
voter_address_on_stage, new_address_created = VoterAddress.objects.update_or_create(
voter_id__exact=voter_id, address_type=address_type, defaults=updated_values)
voter_address_on_stage_found = voter_address_on_stage.id
success = True
except VoterAddress.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
success = False
status = 'MULTIPLE_MATCHING_ADDRESSES_FOUND'
exception_multiple_object_returned = True
else:
success = False
status = 'MISSING_VOTER_ID_OR_ADDRESS_TYPE'
results = {
'success': success,
'status': status,
'MultipleObjectsReturned': exception_multiple_object_returned,
'voter_address_saved': success,
'address_type': address_type,
'new_address_created': new_address_created,
'voter_address_found': voter_address_on_stage_found,
'voter_address': voter_address_on_stage,
}
return results
def update_voter_address_with_normalized_values(self, voter_id, voter_address_dict):
voter_address_id = 0
address_type = BALLOT_ADDRESS
results = self.retrieve_address(voter_address_id, voter_id, address_type)
if results['success']:
voter_address = results['voter_address']
try:
voter_address.normalized_line1 = voter_address_dict['line1']
voter_address.normalized_city = voter_address_dict['city']
voter_address.normalized_state = voter_address_dict['state']
voter_address.normalized_zip = voter_address_dict['zip']
voter_address.refreshed_from_google = True
voter_address.save()
status = "SAVED_VOTER_ADDRESS_WITH_NORMALIZED_VALUES"
success = True
except Exception as e:
status = "UNABLE_TO_SAVE_VOTER_ADDRESS_WITH_NORMALIZED_VALUES"
success = False
handle_record_not_saved_exception(e, logger=logger, exception_message_optional=status)
else:
# If here, we were unable to find pre-existing VoterAddress
status = "UNABLE_TO_FIND_VOTER_ADDRESS"
voter_address = VoterAddress() # TODO Finish this for "create new" case
success = False
results = {
'status': status,
'success': success,
'voter_address': voter_address,
}
return results
def update_existing_voter_address_object(self, voter_address_object):
results = self.retrieve_address(voter_address_object.id)
if results['success']:
try:
voter_address_object.save() # Save the incoming object
status = "UPDATED_EXISTING_VOTER_ADDRESS"
success = True
voter_address_found = True
except Exception as e:
status = "UNABLE_TO_UPDATE_EXISTING_VOTER_ADDRESS"
success = False
voter_address_found = False
handle_record_not_saved_exception(e, logger=logger, exception_message_optional=status)
else:
# If here, we were unable to find pre-existing VoterAddress
status = "UNABLE_TO_FIND_AND_UPDATE_VOTER_ADDRESS"
voter_address_object = None
success = False
voter_address_found = False
results = {
'status': status,
'success': success,
'voter_address': voter_address_object,
'voter_address_found': voter_address_found,
}
return results
def voter_setup(request):
"""
This is only used for sign in on the API server, and is not used for WebApp
:param request:
:return:
"""
generate_voter_api_device_id_if_needed = True
voter_api_device_id = get_voter_api_device_id(request, generate_voter_api_device_id_if_needed)
voter_id = 0
voter_id_found = False
store_new_voter_api_device_id_in_cookie = True
voter_device_link_manager = VoterDeviceLinkManager()
results = voter_device_link_manager.retrieve_voter_device_link_from_voter_device_id(voter_api_device_id)
if results['voter_device_link_found']:
voter_device_link = results['voter_device_link']
voter_id = voter_device_link.voter_id
voter_id_found = True if positive_value_exists(voter_id) else False
store_new_voter_api_device_id_in_cookie = False if positive_value_exists(voter_id_found) else True
# If existing voter not found, create a new voter
if not voter_id_found:
# Create a new voter and return the id
voter_manager = VoterManager()
results = voter_manager.create_voter()
if results['voter_created']:
voter = results['voter']
voter_id = voter.id
# Now save the voter_device_link
results = voter_device_link_manager.save_new_voter_device_link(voter_api_device_id, voter_id)
if results['voter_device_link_created']:
voter_device_link = results['voter_device_link']
voter_id = voter_device_link.voter_id
voter_id_found = True if voter_id > 0 else False
store_new_voter_api_device_id_in_cookie = True
else:
voter_id = 0
voter_id_found = False
final_results = {
'voter_id': voter_id,
'voter_api_device_id': voter_api_device_id,
'voter_id_found': voter_id_found,
'store_new_voter_api_device_id_in_cookie': store_new_voter_api_device_id_in_cookie,
}
return final_results
|
bsd-3-clause
| -5,296,734,359,636,039,000
| 42.612293
| 120
| 0.59179
| false
| 3.859953
| false
| false
| false
|
matslindh/kimochi
|
alembic/versions/8f5b2066cbac_add_self_referential_image_reference.py
|
1
|
2160
|
"""Add self-referential image reference
Revision ID: 8f5b2066cbac
Revises: 698cc06661d6
Create Date: 2016-03-20 19:35:31.321144
"""
# revision identifiers, used by Alembic.
revision = '8f5b2066cbac'
down_revision = '698cc06661d6'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('images', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_images_gallery_id'), ['gallery_id'], unique=False)
batch_op.create_index(batch_op.f('ix_images_site_id'), ['site_id'], unique=False)
batch_op.create_foreign_key('ix_images_parent_image_id', 'images', ['parent_image_id'], ['id'])
batch_op.create_foreign_key('ix_sites_site_id', 'sites', ['site_id'], ['id'])
batch_op.create_foreign_key('ix_galleries_gallery_id', 'galleries', ['gallery_id'], ['id'])
with op.batch_alter_table('pages_sections_layout_settings', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_pages_sections_layout_settings_page_section_id'), ['page_section_id'], unique=False)
with op.batch_alter_table('sites_settings', schema=None) as batch_op:
batch_op.drop_index('ix_sites_settings_site_id')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('sites_settings', schema=None) as batch_op:
batch_op.create_index('ix_sites_settings_site_id', ['site_id'], unique=False)
with op.batch_alter_table('pages_sections_layout_settings', schema=None) as batch_op:
batch_op.drop_index(batch_op.f('ix_pages_sections_layout_settings_page_section_id'))
with op.batch_alter_table('images', schema=None) as batch_op:
batch_op.drop_constraint(None, type_='foreignkey')
batch_op.drop_constraint(None, type_='foreignkey')
batch_op.drop_constraint(None, type_='foreignkey')
batch_op.drop_index(batch_op.f('ix_images_site_id'))
batch_op.drop_index(batch_op.f('ix_images_gallery_id'))
### end Alembic commands ###
|
mit
| 7,596,053,783,737,711,000
| 40.538462
| 129
| 0.678241
| false
| 3.167155
| false
| false
| false
|
akalipetis/raven-python
|
raven/_compat.py
|
1
|
5038
|
"""Utilities for writing code that runs on Python 2 and 3"""
# flake8: noqa
# Copyright (c) 2010-2013 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import absolute_import
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.3.0"
PY2 = sys.version_info[0] == 2
if not PY2:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
if not PY2:
_iterkeys = "keys"
_itervalues = "values"
_iteritems = "items"
_iterlists = "lists"
else:
_iterkeys = "iterkeys"
_itervalues = "itervalues"
_iteritems = "iteritems"
_iterlists = "iterlists"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
def iterkeys(d, **kw):
"""Return an iterator over the keys of a dictionary."""
return iter(getattr(d, _iterkeys)(**kw))
def itervalues(d, **kw):
"""Return an iterator over the values of a dictionary."""
return iter(getattr(d, _itervalues)(**kw))
def iteritems(d, **kw):
"""Return an iterator over the (key, value) pairs of a dictionary."""
return iter(getattr(d, _iteritems)(**kw))
def iterlists(d, **kw):
"""Return an iterator over the (key, [values]) pairs of a dictionary."""
return iter(getattr(d, _iterlists)(**kw))
if not PY2:
def b(s):
return s.encode("latin-1")
def u(s):
return s
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s): # NOQA
return s
def u(s): # NOQA
return unicode(s, "unicode_escape")
int2byte = chr
import StringIO
StringIO = BytesIO = StringIO.StringIO
if not PY2:
import builtins
exec_ = getattr(builtins, "exec")
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
del builtins
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
def with_metaclass(meta, base=object):
"""Create a base class with a metaclass."""
return meta("NewBase", (base,), {})
def get_code(func):
rv = getattr(func, '__code__', getattr(func, 'func_code', None))
if rv is None:
raise TypeError('Could not get code from %r' % type(func).__name__)
return rv
|
bsd-3-clause
| -442,859,743,706,172,900
| 26.380435
| 82
| 0.618499
| false
| 3.79082
| false
| false
| false
|
moto-timo/robotframework
|
utest/output/test_filelogger.py
|
1
|
1867
|
import unittest
import time
from robot.output.filelogger import FileLogger
from robot.utils import StringIO, robottime
from robot.utils.asserts import *
from robot.utils.robottime import TimestampCache
class _FakeTimeCache(TimestampCache):
def __init__(self):
self.fake = time.mktime((2006, 6, 13, 8, 37, 42, 0, 0, 1)) + 0.123
TimestampCache.__init__(self)
def _get_epoch(self):
return self.fake
class TestFileLogger(unittest.TestCase):
def setUp(self):
robottime.TIMESTAMP_CACHE = _FakeTimeCache()
FileLogger._get_writer = lambda *args: StringIO()
self.logger = FileLogger('whatever', 'INFO')
def tearDown(self):
robottime.TIMESTAMP_CACHE = TimestampCache()
def test_write(self):
self.logger.write('my message', 'INFO')
expected = '20060613 08:37:42.123 | INFO | my message\n'
self._verify_message(expected)
self.logger.write('my 2nd msg\nwith 2 lines', 'ERROR')
expected += '20060613 08:37:42.123 | ERROR | my 2nd msg\nwith 2 lines\n'
self._verify_message(expected)
def test_write_helpers(self):
self.logger.info('my message')
expected = '20060613 08:37:42.123 | INFO | my message\n'
self._verify_message(expected)
self.logger.warn('my 2nd msg\nwith 2 lines')
expected += '20060613 08:37:42.123 | WARN | my 2nd msg\nwith 2 lines\n'
self._verify_message(expected)
def test_set_level(self):
self.logger.write('msg', 'DEBUG')
self._verify_message('')
self.logger.set_level('DEBUG')
self.logger.write('msg', 'DEBUG')
self._verify_message('20060613 08:37:42.123 | DEBUG | msg\n')
def _verify_message(self, expected):
assert_equals(self.logger._writer.getvalue(), expected)
if __name__ == "__main__":
unittest.main()
|
apache-2.0
| 1,760,009,257,251,679,000
| 31.189655
| 80
| 0.636315
| false
| 3.516008
| true
| false
| false
|
loleg/realms-wiki
|
realms/lib/util.py
|
1
|
3701
|
import click
import re
import os
import hashlib
import json
import string
import random
import sys
from jinja2 import Template
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def random_string(size=6, chars=string.ascii_lowercase + string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def to_json(data):
return json.dumps(to_dict(data), separators=(',', ':'))
def to_dict(data):
if not data:
return AttrDict()
def row2dict(row):
d = AttrDict()
for column in row.__table__.columns:
d[column.name] = getattr(row, column.name)
return d
if isinstance(data, list):
return [row2dict(x) for x in data]
else:
return row2dict(data)
def mkdir_safe(path):
if path and not(os.path.exists(path)):
os.makedirs(path)
return path
def extract_path(file_path):
if not file_path:
return None
last_slash = file_path.rindex("/")
if last_slash:
return file_path[0, last_slash]
def clean_path(path):
if path:
if path[0] != '/':
path.insert(0, '/')
return re.sub(r"//+", '/', path)
def extract_name(file_path):
if file_path[-1] == "/":
return None
return os.path.basename(file_path)
def remove_ext(path):
return os.path.splitext(path)[0]
def clean_url(url):
if not url:
return url
url = url.replace('%2F', '/')
url = re.sub(r"^/+", "", url)
return re.sub(r"//+", '/', url)
def to_canonical(s):
"""
Double space -> single dash
Double dash -> single dash
Remove .md file extension
Remove all non alphanumeric and dash
Limit to first 64 chars
"""
s = s.encode('ascii', 'ignore')
s = str(s)
s = re.sub(r"\s\s*", "-", s)
s = re.sub(r"\-\-+", "-", s)
s = re.sub(r"\.md$", "", s)
s = re.sub(r"[^a-zA-Z0-9\-]", "", s)
s = s[:64]
return s
def cname_to_filename(cname):
""" Convert canonical name to filename
:param cname: Canonical name
:return: str -- Filename
"""
return cname + ".md"
def filename_to_cname(filename):
"""Convert filename to canonical name.
.. note::
It's assumed filename is already canonical format
"""
return os.path.splitext(filename)[0]
def gravatar_url(email):
return "https://www.gravatar.com/avatar/" + hashlib.md5(email).hexdigest()
def in_virtualenv():
return hasattr(sys, 'real_prefix')
def in_vagrant():
return os.path.isdir("/vagrant")
def is_su():
return os.geteuid() == 0
def green(s):
click.secho(s, fg='green')
def yellow(s):
click.secho(s, fg='yellow')
def red(s):
click.secho(s, fg='red')
def upstart_script(user='root', app_dir=None, port=5000, workers=2, path=None):
script = """
limit nofile 65335 65335
respawn
description "Realms Wiki"
author "scragg@gmail.com"
chdir {{ app_dir }}
{% if path %}
env PATH={{ path }}:/usr/local/bin:/usr/bin:/bin:$PATH
export PATH
{% endif %}
env LC_ALL=en_US.UTF-8
env GEVENT_RESOLVER=ares
export LC_ALL
export GEVENT_RESOLVER
setuid {{ user }}
setgid {{ user }}
start on runlevel [2345]
stop on runlevel [!2345]
respawn
exec gunicorn \
--name realms-wiki \
--access-logfile - \
--error-logfile - \
--worker-class gevent \
--workers {{ workers }} \
--bind 0.0.0.0:{{ port }} \
--user {{ user }} \
--group {{ user }} \
--chdir {{ app_dir }} \
'realms:create_app()'
"""
template = Template(script)
return template.render(user=user, app_dir=app_dir, port=port, workers=workers, path=path)
|
gpl-2.0
| -2,907,286,904,536,917,000
| 17.979487
| 97
| 0.597406
| false
| 3.160547
| false
| false
| false
|
zekroTJA/regiusBot
|
commands/cmd_mute.py
|
1
|
2707
|
import discord
from os import path, makedirs, remove
import STATICS
ROLE_NAME = "Supporter"
perm = 2
description = "Mute members on guild in chat"
def get_mutes(server):
if not path.isdir("SAVES/" + server.id):
makedirs("SAVES/" + server.id)
if path.isfile("SAVES/" + server.id + "/mutes"):
with open("SAVES/" + server.id + "/mutes") as f:
return [line.replace("\n", "") for line in f.readlines()]
else:
return []
def add_mute(member, server):
mutelist = get_mutes(server)
mutelist.append(member.id)
try:
remove("SAVES/" + server.id + "/mutes")
except:
pass
with open("SAVES/" + server.id + "/mutes", "w") as fw:
[(lambda x: fw.write(x + "\n"))(line) for line in mutelist]
def rem_mute(member, server):
mutelist = get_mutes(server)
mutelist.remove(member.id)
try:
remove("SAVES/" + server.id + "/mutes")
except:
pass
with open("SAVES/" + server.id + "/mutes", "w") as fw:
[(lambda x: fw.write(x))(line) for line in mutelist]
def get_member(id, server):
return discord.utils.get(server.members, id=id)
async def check_mute(message, client):
if not message.channel.is_private:
if get_mutes(message.server).__contains__(message.author.id):
await client.send_message(message.author, embed=discord.Embed(color=discord.Color.red(), description="Sorry, but you got muted on this server! Contact a supporter to get unmuted."))
await client.delete_message(message)
async def ex(message, client):
if message.content.replace(STATICS.PREFIX + "mute ", "") == "list":
muted_str = "\n".join([get_member(line, message.server).name for line in get_mutes(message.server)]) if len(get_mutes(message.server)) > 0 else "no one"
await client.send_message(message.channel, embed=discord.Embed(description="**MUTED MEMBERS\n\n**" + muted_str))
elif len(message.mentions) < 1:
await client.send_message(message.channel, embed=discord.Embed(color=discord.Color.red(), description="Please mention the user you want to mute!"))
elif get_mutes(message.server).__contains__(message.mentions[0].id):
rem_mute(message.mentions[0], message.server)
await client.send_message(message.channel, embed=discord.Embed(color=discord.Color.green(), description=("%s got unmuted by %s." % (message.mentions[0].mention, message.author.mention))))
else:
add_mute(message.mentions[0], message.server)
await client.send_message(message.channel, embed=discord.Embed(color=discord.Color.orange(), description=("%s got muted by %s." % (message.mentions[0].mention, message.author.mention))))
|
mit
| 6,176,478,198,408,302,000
| 39.402985
| 195
| 0.655707
| false
| 3.253606
| false
| false
| false
|
totalgood/nlpia
|
src/nlpia/scripts/lsa_tweets.py
|
1
|
3415
|
import os
import gc
import json
import numpy as np
import gzip
from gensim.models import TfidfModel, LsiModel
from gensim.corpora import Dictionary
from nlpia.data.loaders import BIGDATA_PATH, read_csv
KEEP_N = 300000 # max vocab size
NO_BELOW = 5 # min DF (count)
NO_ABOVE = .7 # max DF (fraction)
def lsa_twitter(cased_tokens):
""" Latent Sentiment Analyis on random sampling of twitter search results for words listed in cased_tokens """
# Only 5 of these tokens are saved for a no_below=2 filter:
# PyCons NLPS #PyCon2016 #NaturalLanguageProcessing #naturallanguageprocessing
if cased_tokens is None:
cased_tokens = ('PyConOpenSpaces PyCon PyCon2017 PyCon2018 PyCon2016 PyCon2015 OpenSpace PyconTutorial ' +
'NLP NaturalLanguageProcessing NLPInAction NaturalLanguageProcessingInAction NLPIA Twote Twip'
).split()
cased_tokens += [s + 's' for s in cased_tokens]
cased_tokens += 'TotalGood TotalGoods HobsonLane Hob Hobs TotalGood.com ' \
'www.TotalGood.com http://www.TotalGood.com https://www.TotalGood.com'.split()
allcase_tokens = cased_tokens + [s.lower() for s in cased_tokens]
allcase_tokens += [s.title() for s in cased_tokens]
allcase_tokens += [s.upper() for s in cased_tokens]
KEEP_TOKENS = allcase_tokens + ['#' + s for s in allcase_tokens]
# takes 15 minutes and 10GB of RAM for 500k tweets if you keep all 20M unique tokens/names URLs
vocab_path = os.path.join(BIGDATA_PATH, 'vocab939370.pkl')
if os.path.isfile(vocab_path):
print('Loading vocab: {} ...'.format(vocab_path))
vocab = Dictionary.load(vocab_path)
print(' len(vocab) loaded: {}'.format(len(vocab.dfs)))
else:
tweets_path = os.path.join(BIGDATA_PATH, 'tweets.csv.gz')
print('Loading tweets: {} ...'.format(tweets_path))
tweets = read_csv(tweets_path)
tweets = np.array(tweets.text.str.split())
with gzip.open(os.path.join(BIGDATA_PATH, 'tweets.txt.gz'), 'w') as f:
for tokens in tweets:
f.write((' '.join(tokens) + '\n').encode('utf-8'))
# tweets['text'] = tweets.text.apply(lambda s: eval(s).decode('utf-8'))
# tweets['user'] = tweets.user.apply(lambda s: eval(s).decode('utf-8'))
# tweets.to_csv('tweets.csv.gz', compression='gzip')
print('Computing vocab from {} tweets...'.format(len(tweets)))
vocab = Dictionary(tweets, no_below=NO_BELOW, no_above=NO_ABOVE, keep_tokens=set(KEEP_TOKENS))
vocab.filter_extremes(no_below=NO_BELOW, no_above=NO_ABOVE, keep_n=KEEP_N, keep_tokens=set(KEEP_TOKENS))
print(' len(vocab) after filtering: {}'.format(len(vocab.dfs)))
# no time at all, just a bookeeping step, doesn't actually compute anything
tfidf = TfidfModel(id2word=vocab, dictionary=vocab)
tfidf.save(os.path.join(BIGDATA_PATH, 'tfidf{}.pkl'.format(len(vocab.dfs))))
tweets = [vocab.doc2bow(tw) for tw in tweets]
json.dump(tweets, gzip.open(os.path.join(BIGDATA_PATH, 'tweet_bows.json.gz'), 'w'))
gc.collect()
# LSA is more useful name than LSA
lsa = LsiModel(tfidf[tweets], num_topics=200, id2word=vocab, extra_samples=100, power_iters=2)
return lsa
if __name__ == '__main__':
lsa = lsa_twitter()
# these models can be big
lsa.save(os.path.join(BIGDATA_PATH, 'lsa_tweets'))
|
mit
| -2,815,172,349,528,805,000
| 42.782051
| 118
| 0.656223
| false
| 3.133028
| false
| false
| false
|
ResolveWang/algrithm_qa
|
binarytree/q1.py
|
1
|
3054
|
"""
问题描述:分别用递归和非递归方式实现二叉树的先序、中序、和后续遍历
思路:使用非递归的方式需要使用辅助栈代替函数栈
"""
from binarytree.toolcls import Node
class RecursiveVisit:
@classmethod
def visit_in_first_order(cls, head):
if head is None:
return
print(head.value, end=' ')
cls.visit_in_first_order(head.left)
cls.visit_in_first_order(head.right)
@classmethod
def visit_in_mid_order(cls, head):
if head is None:
return
cls.visit_in_mid_order(head.left)
print(head.value, end=' ')
cls.visit_in_mid_order(head.right)
@classmethod
def visit_in_last_order(cls, head):
if head is None:
return
cls.visit_in_last_order(head.left)
cls.visit_in_last_order(head.right)
print(head.value, end=' ')
class LoopVisit:
@classmethod
def visit_in_first_order(cls, head):
if head is None:
return
stack = list()
stack.append(head)
while len(stack) > 0:
node = stack.pop()
print(node.value, end=' ')
if node.right is not None:
stack.append(node.right)
if node.left is not None:
stack.append(node.left)
@classmethod
def visit_in_mid_order(cls, head):
if head is None:
return
stack = list()
cur = head
while len(stack) > 0 or cur is not None:
if cur is not None:
stack.append(cur)
cur = cur.left
else:
cur = stack.pop()
print(cur.value, end=' ')
cur = cur.right
@classmethod
def visit_in_last_order(cls, head):
if head is None:
return
stack1 = list()
stack2 = list()
cur = head
stack1.append(cur)
while len(stack1) > 0:
cur = stack1.pop()
if cur.left is not None:
stack1.append(cur.left)
if cur.right is not None:
stack1.append(cur.right)
stack2.append(cur.value)
while len(stack2) > 0:
print(stack2.pop(), end=' ')
if __name__ == '__main__':
head = Node(5)
head.left = Node(3)
head.right = Node(8)
head.left.left = Node(2)
head.left.right = Node(4)
head.left.left.left = Node(1)
head.right.left = Node(7)
head.right.left.left = Node(6)
head.right.right = Node(10)
head.right.right.left = Node(9)
head.right.right.right = Node(11)
RecursiveVisit.visit_in_first_order(head)
print()
LoopVisit.visit_in_first_order(head)
print()
print('===========================')
RecursiveVisit.visit_in_mid_order(head)
print()
LoopVisit.visit_in_mid_order(head)
print()
print('===========================')
RecursiveVisit.visit_in_last_order(head)
print()
LoopVisit.visit_in_last_order(head)
print()
|
mit
| -6,681,515,285,919,521,000
| 24.153846
| 48
| 0.532631
| false
| 3.309336
| false
| false
| false
|
Gjacquenot/AcousticBEM
|
Python/HelmholtzIntegralsRAD.py
|
1
|
15968
|
# ---------------------------------------------------------------------------
# Copyright (C) 2017 Frank Jargstorff
#
# This file is part of the AcousticBEM library.
# AcousticBEM is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# AcousticBEM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with AcousticBEM. If not, see <http://www.gnu.org/licenses/>.
# ---------------------------------------------------------------------------
import numpy as np
from numpy.linalg import norm
from HelmholtzIntegrals2D import ComplexQuad
from Geometry import Normal2D
class CircularIntegratorPi(object):
"""
Integrator class for integrating the upper half-circle or in other
words integrate a function along the unit acr over angles
theta in [0, pi].
"""
samples = np.array([[0.980144928249, 5.061426814519E-02],
[0.898333238707, 0.111190517227],
[0.762766204958, 0.156853322939],
[0.591717321248, 0.181341891689],
[0.408282678752, 0.181341891689],
[0.237233795042, 0.156853322939],
[0.101666761293, 0.111190517227],
[1.985507175123E-02, 5.061426814519E-02]], dtype=np.float32)
def __init__(self, segments):
self.segments = segments
nSamples = segments * self.samples.shape[0]
self.rotationFactors = np.empty((nSamples, 2), dtype=np.float32)
factor = np.pi / self.segments
for i in range(nSamples):
arcAbscissa = i / self.samples.shape[0] + self.samples[i % self.samples.shape[0], 0]
arcAbscissa *= factor
self.rotationFactors[i, :] = np.cos(arcAbscissa), np.sin(arcAbscissa)
def integrate(self, func):
sum = 0.0
for n in range(self.rotationFactors.shape[0]):
sum += self.samples[n % self.samples.shape[0], 1] * func(self.rotationFactors[n, :])
return sum * np.pi / self.segments
def ComplexQuadGenerator(func, start, end):
"""
This is a variation on the basic complex quadrature function from the
base class. The difference is, that the abscissa values y**2 have been
substituted for x. Kirkup doesn't explain the details of why this
is helpful for the case of this kind of 2D integral evaluation, but points
to his PhD thesis and another reference that I have no access to.
"""
samples = np.array([[0.980144928249, 5.061426814519E-02],
[0.898333238707, 0.111190517227],
[0.762766204958, 0.156853322939],
[0.591717321248, 0.181341891689],
[0.408282678752, 0.181341891689],
[0.237233795042, 0.156853322939],
[0.101666761293, 0.111190517227],
[1.985507175123E-02, 5.061426814519E-02]], dtype=np.float32)
vec = end - start
sum = 0.0
for n in range(samples.shape[0]):
x = start + samples[n, 0]**2 * vec
sum += samples[n, 1] * func(x) * samples[n, 0]
return 2.0 * sum * norm(vec)
def ComplexQuadCone(func, start, end, segments = 1):
delta = 1.0 / segments * (end - start)
sum = 0.0
for s in range(segments):
sum += ComplexQuad(func, start + s * delta, start + (s+1) * delta)
return sum
def ComputeL(k, p, qa, qb, pOnElement):
qab = qb - qa
# subdived circular integral into sections of
# similar size as qab
q = 0.5 * (qa + qb)
nSections = 1 + int(q[0] * np.pi / norm(qab))
if pOnElement:
ap = p - qa
if k == 0.0:
def generatorFunc(x):
circle = CircularIntegratorPi(2 * nSections)
r = x[0]
z = x[1]
p3 = np.array([p[0], 0.0, p[1]], dtype=np.float32)
def circleFunc(x):
q3 = np.array([r * x[0], r * x[1], z], dtype=np.float32)
rr = q3 - p3
return 1.0 / norm(rr)
return circle.integrate(circleFunc) * r / (2.0 * np.pi)
return ComplexQuadGenerator(generatorFunc, p, qa) + ComplexQuadGenerator(generatorFunc, p, qb)
else:
def generatorFunc(x):
circle = CircularIntegratorPi(2 * nSections)
r = x[0]
z = x[1]
p3 = np.array([p[0], 0.0, p[1]], dtype=np.float32)
def circleFunc(x):
q3 = np.array([r * x[0], r * x[1], z], dtype=np.float32)
rr = q3 - p3
RR = norm(rr)
return (np.exp(1.0j * k * RR) - 1.0) / RR
return circle.integrate(circleFunc) * r / (2.0 * np.pi)
return ComputeL(0.0, p, qa, qb, True) + ComplexQuad(generatorFunc, qa, qb)
else:
if k == 0.0:
def generatorFunc(x):
circle = CircularIntegratorPi(nSections)
r = x[0]
z = x[1]
p3 = np.array([p[0], 0.0, p[1]], dtype=np.float32)
def circleFunc(x):
q3 = np.array([r * x[0], r * x[1], z], dtype=np.float32)
rr = q3 - p3
return 1.0 / norm(rr)
return circle.integrate(circleFunc) * r / (2.0 * np.pi)
return ComplexQuad(generatorFunc, qa, qb)
else:
def generatorFunc(x):
circle = CircularIntegratorPi(nSections)
r = x[0]
z = x[1]
p3 = np.array([p[0], 0.0, p[1]], dtype=np.float32)
def circleFunc(x):
q3 = np.array([r * x[0], r * x[1], z], dtype=np.float32)
rr = q3 - p3
RR = norm(rr)
return np.exp(1.0j * k * RR) / RR
return circle.integrate(circleFunc) * r / (2.0 * np.pi)
return ComplexQuad(generatorFunc, qa, qb)
return 0.0
def ComputeM(k, p, qa, qb, pOnElement):
qab = qb - qa
vec_q = Normal2D(qa, qb)
# subdived circular integral into sections of
# similar size as qab
q = 0.5 * (qa + qb)
nSections = 1 + int(q[0] * np.pi / norm(qab))
if k == 0.0:
def generatorFunc(x):
circle = CircularIntegratorPi(nSections)
r = x[0]
z = x[1]
p3 = np.array([p[0], 0.0, p[1]], dtype=np.float32)
def circleFunc(x):
q3 = np.array([r * x[0], r * x[1], z], dtype=np.float32)
vec_q3 = np.array([vec_q[0] * x[0], vec_q[0] * x[1], vec_q[1]], dtype=np.float32)
rr = q3 - p3
return -np.dot(rr, vec_q3) / (norm(rr) * np.dot(rr, rr))
return circle.integrate(circleFunc) * r / (2.0 * np.pi)
if pOnElement:
return ComplexQuad(generatorFunc, qa, p) + ComplexQuad(generatorFunc, p, qb)
else:
return ComplexQuad(generatorFunc, qa, qb)
else:
def generatorFunc(x):
circle = CircularIntegratorPi(nSections)
r = x[0]
z = x[1]
p3 = np.array([p[0], 0.0, p[1]], dtype=np.float32)
def circleFunc(x):
q3 = np.array([r * x[0], r * x[1], z], dtype=np.float32)
vec_q3 = np.array([vec_q[0] * x[0], vec_q[0] * x[1], vec_q[1]], dtype=np.float32)
rr = q3 - p3
RR = norm(rr)
return (1j * k * RR - 1.0) * np.exp(1j * k * RR) * np.dot(rr, vec_q3) / (RR * np.dot(rr, rr))
return circle.integrate(circleFunc) * r / (2.0 * np.pi)
if pOnElement:
return ComplexQuad(generatorFunc, qa, p) + ComplexQuad(generatorFunc, p, qb)
else:
return ComplexQuad(generatorFunc, qa, qb)
return 0.0
def ComputeMt(k, p, vecp, qa, qb, pOnElement):
qab = qb - qa
# subdived circular integral into sections of
# similar size as qab
q = 0.5 * (qa + qb)
nSections = 1 + int(q[0] * np.pi / norm(qab))
if k == 0.0:
def generatorFunc(x):
circle = CircularIntegratorPi(nSections)
r = x[0]
z = x[1]
p3 = np.array([p[0], 0.0, p[1]], dtype=np.float32)
def circleFunc(x):
q3 = np.array([r * x[0], r * x[1], z], dtype=np.float32)
rr = q3 - p3
dotRnP = vecp[0] * rr[0] + vec[1] * rr[2]
return dotRnP / (norm(rr) * np.dot(rr, rr))
return circle.integrate(circleFunc) * r / (2.0 * np.pi)
if pOnElement:
return ComplexQuad(generatorFunc, qa, p) + ComplexQuad(generatorFunc, p, qb)
else:
return ComplexQuad(generatorFunc, qa, qb)
else:
def generatorFunc(x):
circle = CircularIntegratorPi(nSections)
r = x[0]
z = x[1]
p3 = np.array([p[0], 0.0, p[1]], dtype=np.float32)
def circleFunc(x):
q3 = np.array([r * x[0], r * x[1], z], dtype=np.float32)
rr = q3 - p3
RR = norm(rr)
dotRnP = vecp[0] * rr[0] + vecp[1] * rr[2]
return -(1j * k * RR - 1.0) * np.exp(1j * k * RR) * dotRnP / (RR * np.dot(rr, rr))
return circle.integrate(circleFunc) * r / (2.0 * np.pi)
if pOnElement:
return ComplexQuad(generatorFunc, qa, p) + ComplexQuad(generatorFunc, p, qb)
else:
return ComplexQuad(generatorFunc, qa, qb)
def ComputeN(k, p, vecp, qa, qb, pOnElement):
qab = qb - qa
vec_q = Normal2D(qa, qb)
# subdived circular integral into sections of
# similar size as qab
q = 0.5 * (qa + qb)
nSections = 1 + int(q[0] * np.pi / norm(qab))
if pOnElement:
if k == 0.0:
vecp3 = np.array([vecp[0], 0.0, vecp[1]], dtype=np.float32)
def coneFunc(x, direction):
circle = CircularIntegratorPi(nSections)
r = x[0]
z = x[1]
p3 = np.array([p[0], 0.0, p[1]], dtype=np.float32)
def circleFunc(x):
q3 = np.array([r * x[0], r * x[1], z], dtype=np.float32)
vec_q3 = np.sqrt(0.5) * np.array([x[0], x[1], direction], dtype=np.float32)
dnpnq = np.dot(vecp3, vec_q3)
rr = q3 - p3
RR = norm(rr)
dotRNP = np.dot(rr, vecp3)
dotRNQ = -np.dot(rr, vec_q3)
RNPRNQ = dotRNP * dotRNQ / np.dot(rr, rr)
return (dnpnq + 3.0 * RNPRNQ) / (RR * np.dot(rr, rr))
return circle.integrate(circleFunc) * r / (2.0 * np.pi)
lenAB = norm(qab)
# deal with the cone at the qa side of the generator
direction = np.sign(qa[1] - qb[1])
if direction == 0.0:
direction = 1.0
tip_a = np.array([0.0, qa[1] + direction * qa[0]], dtype=np.float32)
nConeSectionsA = int(qa[0] * np.sqrt(2.0) / lenAB) + 1
coneValA = ComplexQuadCone(lambda x: coneFunc(x, direction), qa, tip_a, nConeSectionsA)
# deal with the cone at the qb side of the generator
direction = np.sign(qb[1] - qa[1])
if direction == 0.0:
direction = -1.0
tip_b = np.array([0.0, qb[1] + direction * qb[0]], dtype=np.float32)
nConeSectionsB = int(qb[0] * np.sqrt(2.0) / lenAB) + 1
coneValB = ComplexQuadCone(lambda x: coneFunc(x, direction), qb, tip_b, nConeSectionsB)
return -(coneValA + coneValB)
else:
def generatorFunc(x):
circle = CircularIntegratorPi(nSections)
r = x[0]
z = x[1]
p3 = np.array([p[0], 0.0, p[1]], dtype=np.float32)
def circleFunc(x):
q3 = np.array([r * x[0], r * x[1], z], dtype=np.float32)
vec_q3 = np.array([vec_q[0] * x[0], vec_q[0] * x[1], vec_q[1]], dtype=np.float32)
rr = q3 - p3
RR = norm(rr)
DNPNQ = vecp[0] * vec_q3[0] + vecp[1] * vec_q3[2]
dotRnP = vecp[0] * rr[0] + vecp[1] * rr[2]
dotRnQ = -np.dot(rr, vec_q3)
RNPRNQ = dotRnP * dotRnQ / np.dot(rr, rr)
RNPNQ = -(DNPNQ + RNPRNQ) / RR
IKR = 1j * k * RR
FPG0 = 1.0 / RR
FPGR = np.exp(IKR) / np.dot(rr, rr) * (IKR - 1.0)
FPGR0 = -1.0 / np.dot(rr, rr)
FPGRR = np.exp(IKR) * (2.0 - 2.0 * IKR - (k*RR)**2) / (RR * np.dot(rr, rr))
FPGRR0 = 2.0 / (RR * np.dot(rr, rr))
return (FPGR - FPGR0) * RNPNQ + (FPGRR - FPGRR0) * RNPRNQ \
+ k**2 * FPG0 / 2.0
return circle.integrate(circleFunc) * r / (2.0 * np.pi)
return ComputeN(0.0, p, vecp, qa, qb, True) - k**2 * ComputeL(0.0, p, qa, qb, True) / 2.0 \
+ ComplexQuad(generatorFunc, qa, p) + ComplexQuad(generatorFunc, p, qb)
else:
if k == 0.0:
def generatorFunc(x):
circle = CircularIntegratorPi(nSections)
r = x[0]
z = x[1]
p3 = np.array([p[0], 0.0, p[1]], dtype=np.float32)
def circleFunc(x):
q3 = np.array([r * x[0], r * x[1], z], dtype=np.float32)
vec_q3 = np.array([vec_q[0] * x[0], vec_q[0] * x[1], vec_q[1]], dtype=np.float32)
rr = q3 - p3
RR = norm(rr)
DNPNQ = vecp[0] * vec_q3[0] + vecp[1] * vec_q3[2]
dotRnP = vecp[0] * rr[0] + vecp[1] * rr[2]
dotRnQ = -np.dot(rr, vec_q3)
RNPRNQ = dotRnP * dotRnQ / np.dot(rr, rr)
RNPNQ = -(DNPNQ + RNPRNQ) / RR
IKR = 1j * k * RR
FPGR = -1.0 / np.dot(rr, rr)
FPGRR = 2.0 / (RR * np.dot(rr, rr))
return FPGR * RNPNQ + FPGRR * RNPRNQ
return circle.integrate(circleFunc) * r / (2.0 * np.pi)
return ComplexQuad(generatorFunc, qa, qb)
else:
def generatorFunc(x):
circle = CircularIntegratorPi(nSections)
r = x[0]
z = x[1]
p3 = np.array([p[0], 0.0, p[1]], dtype=np.float32)
def circleFunc(x):
q3 = np.array([r * x[0], r * x[1], z], dtype=np.float32)
vec_q3 = np.array([vec_q[0] * x[0], vec_q[0] * x[1], vec_q[1]], dtype=np.float32)
rr = q3 - p3
RR = norm(rr)
DNPNQ = vecp[0] * vec_q3[0] + vecp[1] * vec_q3[2]
dotRnP = vecp[0] * rr[0] + vecp[1] * rr[2]
dotRnQ = -np.dot(rr, vec_q3)
RNPRNQ = dotRnP * dotRnQ / np.dot(rr, rr)
RNPNQ = -(DNPNQ + RNPRNQ) / RR
IKR = 1j * k * RR
FPGR = np.exp(IKR) / np.dot(rr, rr) * (IKR - 1.0)
FPGRR = np.exp(IKR) * (2.0 - 2.0 * IKR - (k*RR)**2) / (RR * np.dot(rr, rr))
return FPGR * RNPNQ + FPGRR * RNPRNQ
return circle.integrate(circleFunc) * r / (2.0 * np.pi)
return ComplexQuad(generatorFunc, qa, qb)
|
gpl-3.0
| -5,642,675,042,839,800,000
| 38.92
| 110
| 0.48353
| false
| 3.118141
| false
| false
| false
|
bastibe/PySoundCard
|
pysoundcard.py
|
1
|
26373
|
import sys
import os
from cffi import FFI
import atexit
import numpy as np
import warnings
"""PySoundCard is an audio library based on PortAudio, CFFI and NumPy
PySoundCard can play and record audio data. Audio devices are supported
through PortAudio[1], which is a free, cross-platform, open-source
audio I/O library that runs on may platforms including Windows, OS X,
and Unix (OSS/ALSA). It is accessed through CFFI[2], which is a
foreign function interface for Python calling C code. CFFI is
supported for CPython 2.6+, 3.x and PyPy 2.0+. PySoundCard represents
audio data as NumPy arrays.
PySoundCard is inspired by PyAudio[3]. Its main difference is that it
uses CFFI instead of a CPython extension and tries to implement a more
pythonic interface. Its performance characteristics are very similar.
[1]: http://www.portaudio.com/
[2]: http://cffi.readthedocs.org/
[3]: http://people.csail.mit.edu/hubert/pyaudio/
The basic building block of audio input/output in PySoundCard are
streams. Streams represent sound cards, both for audio playback and
recording. Every stream has a sample rate, a block size, an input
device and/or an output device.
There are two modes of operation for streams: read/write and callback
mode.
In read/write mode, two methods are used to play/record audio: For
playback, you write to a stream. For recording, you read from a
stream. You can read/write up to one block of audio data to a stream
without having to wait for it to play.
In callback mode, a callback function is defined, which will be called
asynchronously whenever there is a new block of audio data available
to read or write. The callback function must then provide/consume one
block of audio data.
A stream can be either full duplex (both input and output) or half
duplex (either input or output). This is determined by specifying one
or two devices for the stream. Both devices must be part of the same
audio API.
Use the function apis() to get a list of all available apis. Use the
function devices() to get a list of all available devices. There are
additional functions to get the default devices and api. If a stream
is created without specifying a device, the default devices are used.
Both devices and apis are simple dictionaries that contain information
and configuration options. Many device options can be changed simply
by modifying the dictionary before passing it to the stream
constructor. This includes the number of channels, the desired
latency, and the audio data format.
PySoundCard is BSD licensed.
(c) 2013, Bastian Bechtold
"""
__version__ = "0.5.2"
ffi = FFI()
ffi.cdef("""
typedef int PaError;
typedef enum PaErrorCode
{
paNoError = 0,
paNotInitialized = -10000,
paUnanticipatedHostError,
paInvalidChannelCount,
paInvalidSampleRate,
paInvalidDevice,
paInvalidFlag,
paSampleFormatNotSupported,
paBadIODeviceCombination,
paInsufficientMemory,
paBufferTooBig,
paBufferTooSmall,
paNullCallback,
paBadStreamPtr,
paTimedOut,
paInternalError,
paDeviceUnavailable,
paIncompatibleHostApiSpecificStreamInfo,
paStreamIsStopped,
paStreamIsNotStopped,
paInputOverflowed,
paOutputUnderflowed,
paHostApiNotFound,
paInvalidHostApi,
paCanNotReadFromACallbackStream,
paCanNotWriteToACallbackStream,
paCanNotReadFromAnOutputOnlyStream,
paCanNotWriteToAnInputOnlyStream,
paIncompatibleStreamHostApi,
paBadBufferPtr
} PaErrorCode;
PaError Pa_Initialize(void);
PaError Pa_Terminate(void);
int Pa_GetVersion(void);
const char *Pa_GetVersionText(void);
typedef int PaDeviceIndex;
typedef enum PaHostApiTypeId
{
paInDevelopment=0, /* use while developing support for a new host API */
paDirectSound=1,
paMME=2,
paASIO=3,
paSoundManager=4,
paCoreAudio=5,
paOSS=7,
paALSA=8,
paAL=9,
paBeOS=10,
paWDMKS=11,
paJACK=12,
paWASAPI=13,
paAudioScienceHPI=14
} PaHostApiTypeId;
typedef struct PaHostApiInfo {
int structVersion;
enum PaHostApiTypeId type;
const char *name;
int deviceCount;
PaDeviceIndex defaultInputDevice;
PaDeviceIndex defaultOutputDevice;
} PaHostApiInfo;
typedef int PaHostApiIndex;
PaHostApiIndex Pa_GetHostApiCount();
const PaHostApiInfo *Pa_GetHostApiInfo(PaHostApiIndex);
typedef double PaTime;
typedef struct PaDeviceInfo {
int structVersion;
const char *name;
PaHostApiIndex hostApi;
int maxInputChannels;
int maxOutputChannels;
PaTime defaultLowInputLatency;
PaTime defaultLowOutputLatency;
PaTime defaultHighInputLatency;
PaTime defaultHighOutputLatency;
double defaultSampleRate;
} PaDeviceInfo;
PaDeviceIndex Pa_GetDeviceCount(void);
const PaDeviceInfo *Pa_GetDeviceInfo(PaDeviceIndex);
PaHostApiIndex Pa_GetDefaultHostApi(void);
PaDeviceIndex Pa_GetDefaultInputDevice(void);
PaDeviceIndex Pa_GetDefaultOutputDevice(void);
const char *Pa_GetErrorText(PaError);
typedef void PaStream;
typedef unsigned long PaSampleFormat;
typedef struct PaStreamParameters {
PaDeviceIndex device;
int channelCount;
PaSampleFormat sampleFormat;
PaTime suggestedLatency;
void *hostApiSpecificStreamInfo;
} PaStreamParameters;
typedef unsigned long PaStreamFlags;
typedef struct PaStreamCallbackTimeInfo{
PaTime inputBufferAdcTime;
PaTime currentTime;
PaTime outputBufferDacTime;
} PaStreamCallbackTimeInfo;
typedef unsigned long PaStreamCallbackFlags;
typedef int PaStreamCallback(const void*, void*, unsigned long,
const PaStreamCallbackTimeInfo*,
PaStreamCallbackFlags, void*);
typedef void PaStreamFinishedCallback(void*);
typedef struct PaStreamInfo {
int structVersion;
PaTime inputLatency;
PaTime outputLatency;
double sampleRate;
} PaStreamInfo;
PaError Pa_OpenStream(PaStream**, const PaStreamParameters*,
const PaStreamParameters*, double,
unsigned long, PaStreamFlags,
PaStreamCallback*, void*);
PaError Pa_CloseStream (PaStream*);
PaError Pa_SetStreamFinishedCallback(PaStream*, PaStreamFinishedCallback*);
PaError Pa_StartStream (PaStream*);
PaError Pa_StopStream (PaStream*);
PaError Pa_AbortStream (PaStream*);
PaError Pa_IsStreamStopped (PaStream*);
PaError Pa_IsStreamActive (PaStream*);
const PaStreamInfo *Pa_GetStreamInfo (PaStream*);
PaTime Pa_GetStreamTime (PaStream*);
double Pa_GetStreamCpuLoad (PaStream*);
PaError Pa_ReadStream (PaStream*, void*, unsigned long);
PaError Pa_WriteStream (PaStream*, const void*, unsigned long);
signed long Pa_GetStreamReadAvailable (PaStream*);
signed long Pa_GetStreamWriteAvailable (PaStream*);
PaError Pa_GetSampleSize (PaSampleFormat);
void Pa_Sleep (long);
""")
continue_flag = 0
complete_flag = 1
abort_flag = 2
_np2pa = {
np.dtype('float32'): 0x01,
np.dtype('int32'): 0x02,
np.dtype('int16'): 0x08,
np.dtype('int8'): 0x10,
np.dtype('uint8'): 0x20
}
try:
_pa = ffi.dlopen('portaudio')
except OSError as err:
if sys.platform == 'darwin':
libname = 'portaudio.dylib'
elif sys.platform == 'win32':
from platform import architecture as _architecture
libname = 'portaudio' + _architecture()[0] + '.dll'
else:
raise
_pa = ffi.dlopen(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'_soundcard_data', libname))
_pa.Pa_Initialize()
atexit.register(_pa.Pa_Terminate)
def hostapi_info(index=None):
"""Return a generator with information about each host API.
If index is given, only one dictionary for the given host API is
returned.
"""
if index is None:
return (hostapi_info(i) for i in range(_pa.Pa_GetHostApiCount()))
else:
info = _pa.Pa_GetHostApiInfo(index)
if not info:
raise RuntimeError("Invalid host API")
assert info.structVersion == 1
return {'name': ffi.string(info.name).decode(errors='ignore'),
'default_input_device': info.defaultInputDevice,
'default_output_device': info.defaultOutputDevice}
def device_info(index=None):
"""Return a generator with information about each device.
If index is given, only one dictionary for the given device is
returned.
"""
if index is None:
return (device_info(i) for i in range(_pa.Pa_GetDeviceCount()))
else:
info = _pa.Pa_GetDeviceInfo(index)
if not info:
raise RuntimeError("Invalid device")
assert info.structVersion == 2
if 'DirectSound' in hostapi_info(info.hostApi)['name']:
enc = 'mbcs'
else:
enc = 'utf-8'
return {'name': ffi.string(info.name).decode(encoding=enc,
errors='ignore'),
'hostapi': info.hostApi,
'max_input_channels': info.maxInputChannels,
'max_output_channels': info.maxOutputChannels,
'default_low_input_latency': info.defaultLowInputLatency,
'default_low_output_latency': info.defaultLowOutputLatency,
'default_high_input_latency': info.defaultHighInputLatency,
'default_high_output_latency': info.defaultHighOutputLatency,
'default_samplerate': info.defaultSampleRate}
def default_hostapi():
"""Return default host API index."""
return _pa.Pa_GetDefaultHostApi()
def default_input_device():
"""Return default input device index."""
idx = _pa.Pa_GetDefaultInputDevice()
if idx < 0:
raise RuntimeError("No default input device available")
return idx
def default_output_device():
"""Return default output device index."""
idx = _pa.Pa_GetDefaultOutputDevice()
if idx < 0:
raise RuntimeError("No default output device available")
return idx
def pa_version():
"""Returns the version information about the portaudio library."""
return (_pa.Pa_GetVersion(), ffi.string(_pa.Pa_GetVersionText()).decode())
class _StreamBase(object):
"""Base class for Stream, InputStream and OutputStream."""
def __init__(self, iparameters, oparameters, samplerate, blocksize,
callback_wrapper, finished_callback,
clip_off=False, dither_off=False, never_drop_input=False,
prime_output_buffers_using_stream_callback=False):
stream_flags = 0x0
if clip_off:
stream_flags |= 0x00000001
if dither_off:
stream_flags |= 0x00000002
if never_drop_input:
stream_flags |= 0x00000004
if prime_output_buffers_using_stream_callback:
stream_flags |= 0x00000008
if callback_wrapper:
self._callback = ffi.callback(
"PaStreamCallback", callback_wrapper, error=abort_flag)
else:
self._callback = ffi.NULL
self._stream = ffi.new("PaStream**")
err = _pa.Pa_OpenStream(self._stream, iparameters, oparameters,
samplerate, blocksize, stream_flags,
self._callback, ffi.NULL)
self._handle_error(err)
# dereference PaStream** --> PaStream*
self._stream = self._stream[0]
# set some stream information
self.blocksize = blocksize
info = _pa.Pa_GetStreamInfo(self._stream)
if not info:
raise RuntimeError("Could not obtain stream info!")
self.samplerate = info.sampleRate
if not oparameters:
self.latency = info.inputLatency
elif not iparameters:
self.latency = info.outputLatency
else:
self.latency = info.inputLatency, info.outputLatency
if finished_callback:
def finished_callback_wrapper(_):
return finished_callback()
self._finished_callback = ffi.callback(
"PaStreamFinishedCallback", finished_callback_wrapper)
err = _pa.Pa_SetStreamFinishedCallback(self._stream,
self._finished_callback)
self._handle_error(err)
# Avoid confusion if something goes wrong before assigning self._stream:
_stream = ffi.NULL
def _handle_error(self, err):
# all error codes are negative:
if err >= 0:
return err
errstr = ffi.string(_pa.Pa_GetErrorText(err)).decode()
if err == -9981 or err == -9980:
# InputOverflowed and OuputUnderflowed are non-fatal:
warnings.warn("%.4f: %s" % (self.time(), errstr),
RuntimeWarning, stacklevel=2)
return err
else:
raise RuntimeError("%.4f: %s" % (self.time(), errstr))
def __del__(self):
# Close stream at garbage collection
self.close()
def __enter__(self):
self.start()
return self
def __exit__(self, type, value, tb):
self.stop()
self.close()
def start(self):
"""Commence audio processing.
If successful, the stream is considered active.
"""
err = _pa.Pa_StartStream(self._stream)
if err == _pa.paStreamIsNotStopped:
return
self._handle_error(err)
def stop(self):
"""Terminate audio processing.
This waits until all pending audio buffers have been played
before it returns. If successful, the stream is considered
inactive.
"""
err = _pa.Pa_StopStream(self._stream)
if err == _pa.paStreamIsStopped:
return
self._handle_error(err)
def abort(self):
"""Terminate audio processing immediately.
This does not wait for pending audio buffers. If successful,
the stream is considered inactive.
"""
err = _pa.Pa_AbortStream(self._stream)
if err == _pa.paStreamIsStopped:
return
self._handle_error(err)
def close(self):
"""Close the stream.
Can be called multiple times.
If the audio stream is active any pending buffers are discarded
as if abort() had been called.
"""
_pa.Pa_CloseStream(self._stream)
# There might be errors if _pa.Pa_Terminate() has been called
# already or if the stream has been closed before.
# Those errors are ignored here, it's too late anyway ...
def is_active(self):
"""Determine whether the stream is active.
A stream is active after a successful call to start(). It
becomes inactive as a result to stop() or abort() or a return
value other than continue from the stream callback.
"""
return self._handle_error(_pa.Pa_IsStreamActive(self._stream)) == 1
def is_stopped(self):
"""Determine whether a stream is stopped.
A stream is stopped before the first call to start() and after
a successful call to stop() or abort(). If the stream callback
returns a value other than continue, the stream is NOT
considered stopped.
"""
return self._handle_error(_pa.Pa_IsStreamStopped(self._stream)) == 1
def time(self):
"""Returns the current stream time in seconds.
This is the same time that is given to the stream callback. It
is monotonically increasing and is not affected by starting or
stopping the stream. This time may be used for synchronizing
other events to the audio stream.
"""
return _pa.Pa_GetStreamTime(self._stream)
def cpu_load(self):
"""Retrieve CPU usage information for the specified stream.
A floating point number between 0.0 and 1.0 that is a fraction
of the total CPU time consumed by the stream callback audio
processing within portaudio. This excludes time spent in the
cffi and Python. This function does not work with blocking
read/write streams.
"""
return _pa.Pa_GetStreamCpuLoad(self._stream)
class InputStream(_StreamBase):
"""Stream for recording only. See :class:`Stream`."""
def __init__(self, samplerate=None, blocksize=0,
device=None, channels=None, dtype='float32', latency=0,
callback=None, finished_callback=None, **flags):
parameters, self.dtype, samplerate = _get_stream_parameters(
'input', device, channels, dtype, latency, samplerate)
self.device = parameters.device
self.channels = parameters.channelCount
def callback_wrapper(iptr, optr, frames, time, status, _):
data = _frombuffer(iptr, frames, self.channels, self.dtype)
return callback(data, _time2dict(time), status)
_StreamBase.__init__(self, parameters, ffi.NULL, samplerate,
blocksize, callback and callback_wrapper,
finished_callback, **flags)
def read_length(self):
"""The number of frames that can be read without waiting."""
return _pa.Pa_GetStreamReadAvailable(self._stream)
def read(self, frames, raw=False):
"""Read samples from an input stream.
The function does not return until the required number of
frames has been read. This may involve waiting for the
operating system to supply the data.
If raw data is requested, the raw cffi data buffer is
returned. Otherwise, a numpy array of the appropriate dtype
with one column per channel is returned.
"""
channels, _ = _split(self.channels)
dtype, _ = _split(self.dtype)
data = ffi.new("signed char[]", channels * dtype.itemsize * frames)
self._handle_error(_pa.Pa_ReadStream(self._stream, data, frames))
if not raw:
data = np.frombuffer(ffi.buffer(data), dtype=dtype)
data.shape = frames, channels
return data
class OutputStream(_StreamBase):
"""Stream for playback only. See :class:`Stream`."""
def __init__(self, samplerate=None, blocksize=0,
device=None, channels=None, dtype='float32', latency=0,
callback=None, finished_callback=None, **flags):
parameters, self.dtype, samplerate = _get_stream_parameters(
'output', device, channels, dtype, latency, samplerate)
self.device = parameters.device
self.channels = parameters.channelCount
def callback_wrapper(iptr, optr, frames, time, status, _):
data = _frombuffer(optr, frames, self.channels, self.dtype)
return callback(data, _time2dict(time), status)
_StreamBase.__init__(self, ffi.NULL, parameters, samplerate,
blocksize, callback and callback_wrapper,
finished_callback, **flags)
def write_length(self):
"""The number of frames that can be written without waiting."""
return _pa.Pa_GetStreamWriteAvailable(self._stream)
def write(self, data):
"""Write samples to an output stream.
As much as one blocksize of audio data will be played
without blocking. If more than one blocksize was provided,
the function will only return when all but one blocksize
has been played.
Data will be converted to a numpy matrix. Multichannel data
should be provided as a (frames, channels) matrix. If the
data is provided as a 1-dim array, it will be treated as mono
data and will be played on all channels simultaneously. If the
data is provided as a 2-dim matrix and fewer tracks are
provided than channels, silence will be played on the missing
channels. Similarly, if more tracks are provided than there
are channels, the extraneous channels will not be played.
"""
frames = len(data)
_, channels = _split(self.channels)
_, dtype = _split(self.dtype)
if (not isinstance(data, np.ndarray) or data.dtype != dtype):
data = np.array(data, dtype=dtype)
if len(data.shape) == 1:
# play mono signals on all channels
data = np.tile(data, (channels, 1)).T
if data.shape[1] > channels:
data = data[:, :channels]
if data.shape < (frames, channels):
# if less data is available than requested, pad with zeros.
tmp = data
data = np.zeros((frames, channels), dtype=dtype)
data[:tmp.shape[0], :tmp.shape[1]] = tmp
data = data.ravel().tostring()
err = _pa.Pa_WriteStream(self._stream, data, frames)
self._handle_error(err)
class Stream(InputStream, OutputStream):
"""Streams handle audio input and output to your application.
Each stream operates at a specific sample rate with specific
sample formats and buffer sizes. Each stream can either be half
duplex (input only or output only) or full duplex (both input and
output). For full duplex operation, the input and output device
must use the same audio api.
Once a stream has been created, audio processing can be started
and stopped multiple times using start(), stop() and abort(). The
functions is_active() and is_stopped() can be used to check this.
The functions info(), time() and cpu_load() can be used to get
additional information about the stream.
Data can be read and written to the stream using read() and
write(). Use read_length() and write_length() to see how many
frames can be read or written at the current time.
Alternatively, a callback can be specified which is called
whenever there is data available to read or write.
"""
def __init__(self, samplerate=None, blocksize=0,
device=None, channels=None, dtype='float32', latency=0,
callback=None, finished_callback=None, **flags):
"""Open a new stream.
If no input or output device is specified, the
default input/output device is taken.
If a callback is given, it will be called whenever the stream
is active and data is available to read or write. If a
finished_callback is given, it will be called whenever the
stream is stopped or aborted. If a callback is given, read()
and write() should not be used.
The callback should have a signature like this:
callback(input, output, time, status) -> flag
where input is the recorded data as a NumPy array, output is
another NumPy array (with uninitialized content), where the data
for playback has to be written to (using indexing).
time is a dictionary with some timing information, and
status indicates whether input or output buffers have
been inserted or dropped to overcome underflow or overflow
conditions.
The function must return one of continue_flag, complete_flag or
abort_flag. complete_flag and abort_flag act as if stop() or
abort() had been called, respectively. continue_flag resumes
normal audio processing.
The finished_callback should be a function with no arguments
and no return values.
"""
idevice, odevice = _split(device)
ichannels, ochannels = _split(channels)
idtype, odtype = _split(dtype)
ilatency, olatency = _split(latency)
iparameters, idtype, isamplerate = _get_stream_parameters(
'input', idevice, ichannels, idtype, ilatency, samplerate)
oparameters, odtype, osamplerate = _get_stream_parameters(
'output', odevice, ochannels, odtype, olatency, samplerate)
self.dtype = idtype, odtype
self.device = iparameters.device, oparameters.device
ichannels = iparameters.channelCount
ochannels = oparameters.channelCount
self.channels = ichannels, ochannels
if isamplerate != osamplerate:
raise RuntimeError(
"Input and output device must have the same samplerate")
else:
samplerate = isamplerate
def callback_wrapper(iptr, optr, frames, time, status, _):
idata = _frombuffer(iptr, frames, ichannels, idtype)
odata = _frombuffer(optr, frames, ochannels, odtype)
return callback(idata, odata, _time2dict(time), status)
_StreamBase.__init__(self, iparameters, oparameters, samplerate,
blocksize, callback and callback_wrapper,
finished_callback, **flags)
def _get_stream_parameters(kind, device, channels, dtype, latency, samplerate):
"""Generate PaStreamParameters struct."""
if device is None:
if kind == 'input':
device = _pa.Pa_GetDefaultInputDevice()
elif kind == 'output':
device = _pa.Pa_GetDefaultOutputDevice()
info = device_info(device)
if channels is None:
channels = info['max_' + kind + '_channels']
dtype = np.dtype(dtype)
try:
sample_format = _np2pa[dtype]
except KeyError:
raise ValueError("Invalid " + kind + " sample format")
if samplerate is None:
samplerate = info['default_samplerate']
parameters = ffi.new(
"PaStreamParameters*",
(device, channels, sample_format, latency, ffi.NULL))
return parameters, dtype, samplerate
def _frombuffer(ptr, frames, channels, dtype):
"""Create NumPy array from a pointer to some memory."""
framesize = channels * dtype.itemsize
data = np.frombuffer(ffi.buffer(ptr, frames * framesize), dtype=dtype)
data.shape = -1, channels
return data
def _time2dict(time):
"""Convert PaStreamCallbackTimeInfo struct to dict."""
return {'input_adc_time': time.inputBufferAdcTime,
'current_time': time.currentTime,
'output_dac_time': time.outputBufferDacTime}
def _split(value):
"""Split input/output value into two values."""
if isinstance(value, str):
# iterable, but not meant for splitting
return value, value
try:
invalue, outvalue = value
except TypeError:
invalue = outvalue = value
except ValueError:
raise ValueError("Only single values and pairs are allowed")
return invalue, outvalue
|
bsd-3-clause
| -7,574,350,281,367,325,000
| 33.701316
| 79
| 0.657339
| false
| 4.132404
| false
| false
| false
|
plaufer/wikiwsd
|
wsd/database/mysqlbuildview.py
|
1
|
7099
|
import MySQLdb
import logging
import time
MYSQL_DEAD_LOCK_ERROR = 1213
class MySQLBuildView:
"""The MySQLBuildView class allows database access optimized to
build the disambiguation database
"""
def __init__(self, db_connection):
"""constructor
@param db_connector the database connector used to access the database
"""
self._db_connection = db_connection
self._cursor = db_connection.cursor()
self.reset_cache()
def __del__(self):
"""destructor
closes the database connection
"""
self._db_connection.close()
def insert_article(self, id, title):
"""saves an article in the database
@param id the id of the article
@param title the title of the article
"""
try:
self._cursor.execute('INSERT INTO articles(id, title) VALUES(%s, %s);',
(id, title))
except MySQLdb.Error, e:
logging.error('error saving article "%s" to database: %s (%d)'
% (title.encode('ascii', 'ignore'), e.args[1], e.args[0]))
def insert_redirect(self, source_name, target_name):
"""saves a redirect in the database
@param source_name the name of the source article
@param target_name the name of the target article
"""
try:
self._cursor.execute('INSERT INTO redirects(source_article_name, target_article_name) VALUES(%s, %s);',
(source_name, target_name))
except MySQLdb.Error, e:
logging.error('error saving redirect "%s" --> "%s" to database: %s (%d)'
% (source_name.encode('ascii', 'ignore'), target_name.encode('ascii', 'ignore'), e.args[1], e.args[0]))
def insert_link(self, source_article_id, target_article_name):
"""saves a link to the database and updates the article record it points to
@param source_article_id the id of the article which links to the target
@param target_article_name the name of the target article
@return the id of the referenced article or None if not found
"""
target_article_id = self._resolve_title(target_article_name)
if target_article_id == None:
logging.error('Could not resolve target article "%s" for link from source article %d'
% (target_article_name.encode('ascii', 'ignore'), source_article_id))
else:
try:
self._cursor.execute('INSERT INTO links(source_article_id, target_article_id) VALUES(%s, %s);',
(source_article_id, target_article_id))
except MySQLdb.Error, e:
logging.error('error saving link (%d) --> (%d) to database: %s (%d)'
% (source_article_id, target_article_id, e.args[1], e.args[0]))
return target_article_id
def insert_references(self, target_article_ids):
"""inserts references to update the linkincount field of the target article
@param target_article_ids array of the referenced articles
"""
retry = True
retryCount = 0
while retry and retryCount < 10:
try:
retryCount += 1
self._cursor.executemany('UPDATE articles SET articleincount=articleincount+1 WHERE id=%s;', target_article_ids)
retry = False
except MySQLdb.Error, e:
if e.args[0] == MYSQL_DEAD_LOCK_ERROR:
logging.warning('deadlock upading articleincount field. retrying... (%d)' % (retryCount))
time.sleep(0.05)
else:
logging.error('error updating articleincount field for ids: ("%s"): %s (%s)'
% (",".join([str(id) for id in target_article_ids]), str(e.args[1]), str(e.args[0])))
if retry:
logging.error('error updating articleincount field %d retries DEADLOCK when updating ids: ("%s")'
% (retryCount, ",".join([str(id) for id in target_article_ids])))
def insert_disambiguation(self, string, target_article_name):
"""saves a disambiguation to the database
@param string the disambiguation string used for the linked entity
@param target_article_name the name of the article the disambiguation stands for
"""
target_article_id = self._resolve_title(target_article_name)
if target_article_id == None:
logging.error('Could not resolve target article "%s" for link from source article'
% (target_article_name.encode('ascii', 'ignore')))
else:
try:
self._cursor.execute('INSERT INTO disambiguations(string, target_article_id, occurrences) VALUES(%s, %s, 1) ON DUPLICATE KEY UPDATE occurrences=occurrences+1;',
(string, target_article_id))
except MySQLdb.Error, e:
logging.error('error saving disambiguation "%s" --> %s (%d): %s (%d)'
% (string.encode('ascii', 'ignore'), target_article_name.encode('ascii', 'ignore'), target_article_id, e.args[1], e.args[0]))
def insert_ngrams(self, ngrams):
"""inserts ngrams into the database
@param ngrams a list of ngrams where each ngram is a tuple containing the string,
and a zero or one indicating whether it was used as a link
"""
try:
self._cursor.executemany('INSERT INTO ngrams(string, occurrences, as_link) VALUES(LOWER(%s), 1, %s) ON DUPLICATE KEY UPDATE occurrences=occurrences+1, as_link=as_link+VALUES(as_link);',
ngrams)
except MySQLdb.Error, e:
logging.error('error saving ngrams: %s (%d)' % (e.args[1], e.args[0]))
def commit(self):
'''commits the changes
'''
self._db_connection.commit()
def reset_cache(self):
"""resets the internal cache and thus prevents it from growing too big
"""
self._article_id_cache = {}
def _resolve_title(self, title):
"""resolves an article and returns its id
@param title the title of the article
"""
if title in self._article_id_cache:
return self._article_id_cache[title]
try:
self._cursor.execute('SELECT id FROM articles WHERE title=%s;', (title,))
row = self._cursor.fetchone()
if row == None:
self._cursor.execute('SELECT id FROM articles WHERE title=(SELECT target_article_name FROM redirects WHERE source_article_name=%s);',
(title,))
row = self._cursor.fetchone()
if row == None:
self._article_id_cache[title] = None
else:
self._article_id_cache[title] = row[0]
except MySQLdb.Error, e:
logging.error('error resolving article "%s": %s (%d)'
% (title.encode('ascii', 'ignore'), e.args[1], e.args[0]))
return self._article_id_cache[title]
|
mit
| -3,987,502,628,156,108,000
| 42.286585
| 197
| 0.579377
| false
| 4.195626
| false
| false
| false
|
bancek/egradebook
|
src/lib/compressor/filters/yui.py
|
1
|
1339
|
from subprocess import Popen, PIPE
from compressor.conf import settings
from compressor.filters import FilterBase, FilterError
from compressor.utils import cmd_split
class YUICompressorFilter(FilterBase):
def output(self, **kwargs):
arguments = ''
if self.type == 'js':
arguments = settings.YUI_JS_ARGUMENTS
if self.type == 'css':
arguments = settings.YUI_CSS_ARGUMENTS
command = '%s --type=%s %s' % (settings.YUI_BINARY, self.type, arguments)
if self.verbose:
command += ' --verbose'
try:
p = Popen(cmd_split(command), stdin=PIPE, stdout=PIPE, stderr=PIPE)
filtered, err = p.communicate(self.content)
except IOError, e:
raise FilterError(e)
if p.wait() != 0:
if not err:
err = 'Unable to apply YUI Compressor filter'
raise FilterError(err)
if self.verbose:
print err
return filtered
class YUICSSFilter(YUICompressorFilter):
def __init__(self, *args, **kwargs):
super(YUICSSFilter, self).__init__(*args, **kwargs)
self.type = 'css'
class YUIJSFilter(YUICompressorFilter):
def __init__(self, *args, **kwargs):
super(YUIJSFilter, self).__init__(*args, **kwargs)
self.type = 'js'
|
gpl-3.0
| 2,404,042,129,496,164,400
| 26.895833
| 81
| 0.592233
| false
| 3.803977
| false
| false
| false
|
whereskenneth/Dwarfsquad
|
dwarfsquad/lib/build/from_export/build_compound_methods.py
|
1
|
6738
|
from dwarfsquad.lib.build.from_export.helpers import build_reference_map
from dwarfsquad.lib.utils import to_stderr
from dwarfsquad.model.Calibration import Calibration
from dwarfsquad.model.ChromatogramMethod import ChromatogramMethod
from dwarfsquad.model.CompoundMethod import CompoundMethod
from dwarfsquad.model.PeakIntegration import PeakIntegration
from dwarfsquad.model.ReductionMethod import ReductionMethod
from dwarfsquad.model.RetentionTime import RetentionTime
from dwarfsquad.model.Smoothing import Smoothing
from dwarfsquad.model.Threshold import Threshold
def build_compound_methods(compounds_csv):
compound_methods = []
unique_compound_choromatograms = set()
for row in compounds_csv:
try:
compound_method = get_compound_method(compound_methods, row)
chromatogram_method = get_chromatogram_method(row)
compound_method.chromatogram_methods.append(chromatogram_method)
compound_methods.insert(compound_method.view_order, compound_method)
unique_compound_chromatogram_name = compound_method.name + " - " + chromatogram_method.name
if unique_compound_chromatogram_name in unique_compound_choromatograms:
raise Exception("Assay already contains a compound/chromatogram combo of: " +
unique_compound_chromatogram_name)
else:
unique_compound_choromatograms.add(unique_compound_chromatogram_name)
except Exception as e:
for k, v in row.items():
to_stderr(k + ": " + v)
raise e
reference_map = build_reference_map(compound_methods)
return resolve_references(compound_methods, reference_map)
def resolve_references(compound_methods, reference_map):
resolved_cms = []
for cm in compound_methods:
cm.calibration.normalizers = [reference_map[n] for n in cm.calibration.normalizers if n]
cm.calibration.responses = [reference_map[r] for r in cm.calibration.responses if r]
resolved_ch_ms = []
for ch_m in cm.chromatogram_methods:
try:
reference = ch_m.peak_integration.retention_time.reference
ch_m.peak_integration.retention_time.reference = reference_map[reference]
except KeyError:
pass
resolved_ch_ms.append(ch_m)
cm.chromatogram_methods = resolved_ch_ms
resolved_cms.append(cm)
return resolved_cms
def get_chromatogram_method(row):
chromatogram_method = ChromatogramMethod({})
chromatogram_method.set_peak_integration(get_peak_integration(row))
chromatogram_method.set_reduction_method(get_reduction_method(row))
chromatogram_method.set_name(row.get('chromatogram_name'))
return chromatogram_method
def get_reduction_method(row):
reduction_method = ReductionMethod({})
reduction_method.set_activation_energy(row.get('activation_energy'))
reduction_method.set_combine_ions(row.get('combine_ions'))
reduction_method.set_lower_precursor_mass(row.get('lower_precursor_mass'))
reduction_method.set_upper_precursor_mass(row.get('upper_precursor_mass'))
reduction_method.set_lower_product_mass(row.get('lower_product_mass'))
reduction_method.set_upper_product_mass(row.get('upper_product_mass'))
reduction_method.set_polarity(row.get('polarity'))
return reduction_method
def get_peak_integration(row):
peak_integration = PeakIntegration({})
peak_integration.set_retention_time(get_retention_time(row))
peak_integration.set_threshold(get_threshold(row))
peak_integration.set_smoothing(get_smoothing(row))
peak_integration.set_prioritized_peak_models(get_prioritized_peak_models(row))
return peak_integration
def get_prioritized_peak_models(row):
return str(row.get('prioritized_peak_models')).split(';')
def get_smoothing(row):
smoothing = Smoothing({})
smoothing.set_fixed(row.get('fixed'))
smoothing.set_max(row.get('max'))
smoothing.set_min(row.get('min'))
smoothing.set_optimal_enabled(row.get('optimal_enabled'))
smoothing.set_start(row.get('start'))
return smoothing
def get_threshold(row):
threshold = Threshold({})
threshold.set_peak_probability(row.get('peak_probability'))
threshold.set_absolute_area(row.get('absolute_area'))
threshold.set_absolute_height(row.get('absolute_height'))
threshold.set_first_derivative(row.get('first_derivative'))
threshold.set_second_derivative(row.get('second_derivative'))
threshold.set_min_merge_difference(row.get('min_merge_difference'))
threshold.set_relative_area(row.get('relative_area'))
threshold.set_relative_height(row.get('relative_height'))
threshold.set_saturation(row.get('saturation'))
threshold.set_signal_to_noise(row.get('signal_to_noise'))
threshold.set_relative_low_std_area(row.get('relative_low_std_area'))
threshold.set_relative_low_std_height(row.get('relative_low_std_height'))
return threshold
def get_retention_time(row):
retention_time = RetentionTime({})
retention_time.set_bias(row.get('bias'))
retention_time.set_expected(row.get('expected'))
retention_time.set_lower_tolerance(row.get('lower_tolerance'))
retention_time.set_upper_tolerance(row.get('upper_tolerance'))
retention_time.set_reference(row.get('reference'))
retention_time.set_reference_type_source(row.get('reference_type_source'))
retention_time.set_upper_trace_width(row.get('upper_trace_width'))
retention_time.set_lower_trace_width(row.get('lower_trace_width'))
retention_time.set_window_width(row.get('window_width'))
retention_time.set_estimation_width(row.get('estimation_width'))
retention_time.set_window_multiplier(row.get('window_multiplier'))
return retention_time
def get_calibration(row):
calibration = Calibration({})
calibration.set_degree(row.get('degree'))
calibration.set_enabled(row.get('enabled'))
calibration.set_origin(row.get('origin'))
calibration.set_weighting(row.get('weighting'))
try:
calibration.set_normalizers(str(row.get('normalizers')).split(';'))
except ValueError:
calibration.set_normalizers([])
try:
calibration.set_responses(str(row.get('responses')).split(';'))
except ValueError:
calibration.set_responses([])
return calibration
def get_compound_method(cms, row):
for index, cm in enumerate(cms):
if row.get('compound_name') == cm.name:
return cms.pop(index)
cm = CompoundMethod({})
cm.set_name(row.get('compound_name'))
cm.set_view_order(row.get('view_order'))
cm.set_calibration(get_calibration(row))
return cm
|
mit
| -6,402,530,371,406,678,000
| 37.502857
| 103
| 0.706886
| false
| 3.601283
| false
| false
| false
|
paypal/support
|
support/socket_pool.py
|
1
|
6787
|
'''
Protocol-agnostic socket pooler.
This code is both extremely tested and hard to test.
Modify with caution :-)
"There are two ways of constructing a software design:
One way is to make it so simple that there are obviously no deficiencies,
and the other way is to make it so complicated that there are no obvious deficiencies."
-CAR Hoare, 1980 Turing Award lecture
In particular: it is tempting to attempt to auto-reconnect and re-try at this layer.
This is not possible to do correctly however, since only the protocol aware clients
know what a retry entails. (e.g. SSL handshake, reset protocol state)
'''
import time
import select
import socket
import gevent
import ll
ml = ll.LLogger()
# TODO: free_socks_by_addr using sets instead of lists could probably improve
# performance of cull
class SocketPool(object):
def __init__(self, timeout=0.25, max_sockets=800):
import async # breaks circular dependency
self.timeout = timeout
self.free_socks_by_addr = {}
self.sock_idle_times = {}
self.killsock = async.killsock
self.total_sockets = 0
self.max_socks_by_addr = {} # maximum sockets on an address-by-address basis
self.default_max_socks_per_addr = 50
self.max_sockets = 800
def acquire(self, addr):
#return a free socket, if one is availble; else None
try:
self.cull()
except Exception as e: # never bother caller with cull problems
ml.ld("Exception from cull: {0!r}", e)
socks = self.free_socks_by_addr.get(addr)
if socks:
sock = socks.pop()
del self.sock_idle_times[sock]
try: # sock.fileno() will throw if EBADF
ml.ld("Acquiring sock {0}/FD {1}", str(id(sock)), str(sock.fileno()))
except:
pass
return sock
return None
def release(self, sock):
#this is also a way of "registering" a socket with the pool
#basically, this says "I'm done with this socket, make it available for anyone else"
try: # sock.fileno() will throw if EBADF
ml.ld("Releasing sock {0} /FD {1}", str(id(sock)), str(sock.fileno()))
except:
pass
try:
if select.select([sock], [], [], 0)[0]:
self.killsock(sock)
return #TODO: raise exception when handed messed up socket?
#socket is readable means one of two things:
#1- left in a bad state (e.g. more data waiting -- protocol state is messed up)
#2- socket closed by remote (in which case read will return empty string)
except:
return #if socket was closed, select will raise socket.error('Bad file descriptor')
addr = sock.getpeername()
addr_socks = self.free_socks_by_addr.setdefault(addr, [])
self.total_sockets += 1
self.sock_idle_times[sock] = time.time()
addr_socks.append(sock)
self.reduce_addr_size(addr, self.max_socks_by_addr.get(addr, self.default_max_socks_per_addr))
self.reduce_size(self.max_sockets)
def reduce_size(self, size):
'''
reduce to the specified size by killing the oldest sockets
returns a greenlet that can be joined on to wait for all sockets to close
'''
if self.total_sockets <= size:
return
num_culling = self.total_sockets - size
culled = sorted([(v, k) for k,v in self.sock_idle_times.iteritems()])[-num_culling:]
self.total_sockets -= num_culling
return [self._remove_sock(e[1]) for e in culled]
def reduce_addr_size(self, addr, size):
'''
reduce the number of sockets pooled on the specified address to size
returns a greenlet that can be joined on to wait for all sockets to close
'''
addr_socks = self.free_socks_by_addr.get(addr, [])
if len(addr_socks) <= size:
return
num_culling = len(addr_socks) - size
culled = sorted([(self.sock_idle_times[e], e) for e in addr_socks])[-num_culling:]
self.total_sockets -= num_culling
return [self._remove_sock(e[1]) for e in culled]
def _remove_sock(self, sock):
self.free_socks_by_addr[sock.getpeername()].remove(sock)
del self.sock_idle_times[sock]
return gevent.spawn(self.killsock, sock)
def socks_pooled_for_addr(self, addr):
return len(self.free_socks_by_addr.get(addr, ()))
def cull(self):
#cull sockets which are in a bad state
culled = []
self.total_sockets = 0
#sort the living from the soon-to-be-dead
for addr in self.free_socks_by_addr:
live = []
# STEP 1 - CULL IDLE SOCKETS
for sock in self.free_socks_by_addr[addr]:
# in case the socket does not have an entry in sock_idle_times,
# assume the socket is very old and cull
if time.time() - self.sock_idle_times.get(sock, 0) > self.timeout:
try:
ml.ld("Going to Close sock {{{0}}}/FD {1}",
id(sock), sock.fileno())
except:
pass
culled.append(sock)
else:
try: # check that the underlying fileno still exists
sock.fileno()
live.append(sock)
except socket.error:
pass # if no fileno, the socket is dead and no need to close it
# STEP 2 - CULL READABLE SOCKETS
if live: # (if live is [], select.select() would error)
readable = set(select.select(live, [], [], 0)[0])
# if a socket is readable that means one of two bad things:
# 1- the socket has been closed (and sock.recv() would return '')
# 2- the server has sent some data which no client has claimed
# (which will remain in the recv buffer and mess up the next client)
live = [s for s in live if s not in readable]
culled.extend(readable)
self.free_socks_by_addr[addr] = live
self.total_sockets += len(live)
# shutdown all the culled sockets
for sock in culled:
del self.sock_idle_times[sock]
gevent.spawn(self.killsock, sock)
def __repr__(self):
return "<%s nsocks=%r/%r naddrs=%r>" % (self.__class__.__name__,
self.total_sockets,
self.max_sockets,
len(self.free_socks_by_addr))
|
bsd-3-clause
| 3,623,353,880,487,837,700
| 41.685535
| 102
| 0.573155
| false
| 3.92539
| false
| false
| false
|
JulyKikuAkita/PythonPrac
|
cs15211/RangeModule.py
|
1
|
7564
|
__source__ = 'https://leetcode.com/problems/range-module/'
# Time: O(logK) to O(K)
# Space: O(A+R), the space used by ranges.
#
# Description: Leetcode # 715. Range Module
#
# A Range Module is a module that tracks ranges of numbers.
# Your task is to design and implement the following interfaces in an efficient manner.
#
# addRange(int left, int right) Adds the half-open interval [left, right),
# tracking every real number in that interval.
# Adding an interval that partially overlaps with currently tracked numbers
# should add any numbers in the interval [left, right) that are not already tracked.
#
# queryRange(int left, int right) Returns true if and only if every real number in the interval
# [left, right) is currently being tracked.
#
# removeRange(int left, int right) Stops tracking every real number currently being tracked
# in the interval [left, right).
#
# Example 1:
#
# addRange(10, 20): null
# removeRange(14, 16): null
# queryRange(10, 14): true (Every number in [10, 14) is being tracked)
# queryRange(13, 15): false (Numbers like 14, 14.03, 14.17 in [13, 15) are not being tracked)
# queryRange(16, 17): true (The number 16 in [16, 17) is still being tracked,
# despite the remove operation)
#
# Note:
# A half open interval [left, right) denotes all real numbers left <= x < right.
# 0 < left < right < 10^9 in all calls to addRange, queryRange, removeRange.
# The total number of calls to addRange in a single test case is at most 1000.
# The total number of calls to queryRange in a single test case is at most 5000.
# The total number of calls to removeRange in a single test case is at most 1000.
#
import unittest
import bisect
# 308ms 58.44%
class RangeModule(object):
def __init__(self):
self.ranges = []
def _bounds(self, left, right):
i, j = 0, len(self.ranges) - 1
for d in (100, 10, 1):
while i + d - 1 < len(self.ranges) and self.ranges[i+d-1][1] < left:
i += d
while j >= d - 1 and self.ranges[j-d+1][0] > right:
j -= d
return i, j
def addRange(self, left, right):
i, j = self._bounds(left, right)
if i <= j:
left = min(left, self.ranges[i][0])
right = max(right, self.ranges[j][1])
self.ranges[i:j+1] = [(left, right)]
def queryRange(self, left, right):
i = bisect.bisect_left(self.ranges, (left, float('inf')))
if i: i -= 1
return (bool(self.ranges) and
self.ranges[i][0] <= left and
right <= self.ranges[i][1])
def removeRange(self, left, right):
i, j = self._bounds(left, right)
merge = []
for k in xrange(i, j+1):
if self.ranges[k][0] < left:
merge.append((self.ranges[k][0], left))
if right < self.ranges[k][1]:
merge.append((right, self.ranges[k][1]))
self.ranges[i:j+1] = merge
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/range-module/solution/
#
Approach #1: Maintain Sorted Disjoint Intervals [Accepted]
Complexity Analysis
Time Complexity: Let K be the number of elements in ranges.
addRange and removeRange operations have O(K) complexity
queryRange has O(logK) complexity
Because addRange, removeRange adds at most 1 interval at a time, you can bound these further.
For example, if there are A addRange, R removeRange, and Q queryRange number of operations respectively,
we can express our complexity as O((A+R)^2 Qlog(A+R))
Space Complexity: O(A+R), the space used by ranges.
# 121ms 89.92%
class RangeModule {
TreeSet<Interval> ranges;
public RangeModule() {
ranges = new TreeSet();
}
public void addRange(int left, int right) {
Iterator<Interval> itr = ranges.tailSet(new Interval(0, left - 1)).iterator();
while (itr.hasNext()) {
Interval iv = itr.next();
if (right < iv.left) break;
left = Math.min(left, iv.left);
right = Math.max(right, iv.right);
itr.remove();
}
ranges.add(new Interval(left, right));
}
public boolean queryRange(int left, int right) {
Interval iv = ranges.higher(new Interval(0, left));
return (iv != null && iv.left <= left && right <= iv.right);
}
public void removeRange(int left, int right) {
Iterator<Interval> itr = ranges.tailSet(new Interval(0, left)).iterator();
ArrayList<Interval> todo = new ArrayList();
while (itr.hasNext()) {
Interval iv = itr.next();
if (right < iv.left) break;
if (iv.left < left) todo.add(new Interval(iv.left, left));
if (right < iv.right) todo.add(new Interval(right, iv.right));
itr.remove();
}
for (Interval iv: todo) ranges.add(iv);
}
}
class Interval implements Comparable<Interval>{
int left;
int right;
public Interval(int left, int right){
this.left = left;
this.right = right;
}
public int compareTo(Interval that){
if (this.right == that.right) return this.left - that.left;
return this.right - that.right;
}
}
/**
* Your RangeModule object will be instantiated and called as such:
* RangeModule obj = new RangeModule();
* obj.addRange(left,right);
* boolean param_2 = obj.queryRange(left,right);
* obj.removeRange(left,right);
*/
# 136ms 78.23%
class RangeModule {
List<int[]> ranges = new ArrayList<int[]>();
public RangeModule() {
ranges.add(new int[]{-1, -1});
}
public void addRange(int left, int right) {
int l = searchFloor(left);
int r = searchFloor(right);
int[] vl = ranges.get(l);
int[] vr = ranges.get(r);
if (vr[1] < left) {
ranges.add(r + 1, new int[]{left, right});
} else {
for (int k = 0; k < r - l; k++) ranges.remove(l + 1);
if (vl[1] < left) {
ranges.add(l + 1, new int[]{left, Math.max(right, vr[1])});
} else {
ranges.remove(l);
ranges.add(l, new int[] {vl[0], Math.max(right, vr[1])});
}
}
}
public boolean queryRange(int left, int right) {
int l = searchFloor(left);
int[] r = ranges.get(l);
return (r[1] >= right);
}
public void removeRange(int left, int right) {
int l = searchFloor(left);
int r = searchFloor(right);
int[] vl = ranges.get(l);
int[] vr = ranges.get(r);
if (vr[1] <= left) return;
for (int k = 0; k < r - l; k++) ranges.remove(l + 1);
if (vr[1] > right) {
ranges.add(l + 1, new int[]{right, vr[1]});
}
if (vl[1] > left) {
ranges.remove(l);
if (vl[0] < left) {
ranges.add(l, new int[]{vl[0], left});
}
}
}
// search nearest internal starts at or before key and return the index
private int searchFloor(int key) {
int l = 0, h = ranges.size();
while (l + 1 < h) {
int m = l + (h - l) / 2;
int v = ranges.get(m)[0];
if (v < key) {
l = m;
} else if (v == key) {
l = m;
break;
} else {
h = m;
}
}
return l;
}
}
'''
|
apache-2.0
| 7,008,019,913,116,863,000
| 32.321586
| 105
| 0.569672
| false
| 3.471317
| true
| false
| false
|
jthrun/sdl_android
|
baseAndroid/make_symbolic_links.py
|
1
|
2232
|
import os
import pathlib
from pathlib import Path
import re
def has_admin():
if os.name == 'nt':
try:
# only windows users with admin privileges can read the C:\windows\temp
temp = os.listdir(os.sep.join([os.environ.get('SystemRoot', 'C:\\windows'), 'temp']))
except:
return os.environ['USERNAME'],False
else:
return os.environ['USERNAME'],True
else:
if 'SUDO_USER' in os.environ and os.geteuid() == 0:
return os.environ['SUDO_USER'],True
else:
return os.environ['USERNAME'],False
print('Script Start')
isAdmin = has_admin()
print('Running As Admin - ', isAdmin[1])
if not isAdmin[1]:
print('Can\'t run without admin privileges')
exit()
pathlist = Path('src/').glob('**/*')
# Delete the old directory
os.system('echo y | rmdir windows /s')
for path in pathlist:
path_in_str = str(path)
if os.path.isfile(path):
# check if it's a link to a file or folder
source_link_str = path_in_str
source_link_str = '..\\base\\' + source_link_str
# Remove the root folder for the actual link
print(source_link_str)
testDest = 'windows\\' + path_in_str
directory = pathlib.Path(testDest).parent
print(str(directory))
prefixDir = (re.sub(r"\\+[^\\]*", r"\\..", str(directory))+'\\..\\')[8:] # 8 to remove windows/
# Change all the directory paths into .. so that it will properly move up a folder.
os.system('mkdir %s' % directory)
os.system('icacls %s /grant Everyone:(f)' % directory)
# Now we need to go through each destination directory and understand that's how many ../ we have to add
if path_in_str.endswith('.java'):
print('Java file link found')
command = 'mklink "%s" "%s%s"' % (testDest, prefixDir, source_link_str)
print('Performing command %s' % command)
os.system(command)
else:
print('Directory link found')
command = 'mklink /D "%s" "%s%s"' % (testDest, prefixDir, source_link_str)
print('Performing command %s' % command)
os.system(command)
print('Script Ends')
|
bsd-3-clause
| -4,495,510,196,234,252,000
| 31.347826
| 112
| 0.584229
| false
| 3.701493
| false
| false
| false
|
CIGIHub/django-ga-puller
|
setup.py
|
1
|
1369
|
from setuptools import setup # Always prefer setuptools over distutils
import os
README = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-ga-puller',
version='0.1.6',
packages=['ga_puller', 'ga_puller.management', 'ga_puller.management.commands'],
include_package_data=True,
license='MIT License',
description='Django app used to pull daily Google Analytics data into your django database.',
long_description=README,
url='https://github.com/CIGIHub/django-ga-puller/',
author='Caroline Simpson',
author_email='csimpson@cigionline.org',
install_requires=[
'google-api-python-client >= 1.2',
'pycrypto >= 2.6.1',
],
setup_requires=[
'google-api-python-client >= 1.2',
'pycrypto >= 2.6.1',
],
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
|
mit
| -3,372,976,600,499,143,700
| 34.102564
| 97
| 0.626004
| false
| 3.650667
| false
| true
| false
|
ftalex/buffelgrass_mapper
|
BuffelWeb/Model.py
|
1
|
1562
|
"""
Classes for use with genshi, these are the basic datastructres that are used to create
populate the values of the templaes.
"""
__author__ = "Alex Warren"
__copyright__ = "Copyright 2015, Autonomous Mapping Project"
__credits__ = ["Alex Warren", "Rachel Powers", "Thomas Schuker",
"Travis Kibler", "Jesse Odle", "Jeremy Hibbs"]
__license__ = "BSD 2"
import os
from BuffelMapper.Settings import settings
from droneapi.lib import VehicleMode, Vehicle
class Photograph(object):
def __init__(self, file_path, date_time):
self.file_path = file_path
self.date_time = date_time
def __repr__(self):
return '<%s %r>' % (type(self).__name__, self.date_time)
class Flight(object):
def __init__(self, path, date, flight_title, idx):
self.path = path
self.date = date
self.flight_title = flight_title
self.name = "%s_%s" %(date, flight_title)
self.idx = idx
def __str__(self):
return self.flight_title
class Flights(object):
def __init__(self):
self.flights = []
log_dir = settings["log_dir"]
self.log_dir = log_dir
all_date_dirs = [d for d in os.listdir(log_dir) if os.path.isdir(os.path.join(log_dir, d))]
for date_dir in os.listdir(log_dir):
full_date_dir = os.path.join(log_dir, date_dir)
if not os.path.isdir(full_date_dir):
continue
for flight_dir in os.listdir(full_date_dir):
full_flight_dir = os.path.join(full_date_dir, flight_dir)
if not os.path.isdir(full_flight_dir):
continue
self.flights.append(Flight(full_flight_dir, date_dir, flight_dir, len(self.flights)))
|
bsd-2-clause
| -7,928,987,849,017,785,000
| 28.471698
| 93
| 0.669014
| false
| 2.784314
| false
| false
| false
|
hguemar/cinder
|
cinder/volume/drivers/windows/smbfs.py
|
1
|
10774
|
# Copyright (c) 2014 Cloudbase Solutions SRL
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import sys
from oslo.utils import units
from oslo_config import cfg
from cinder import exception
from cinder.i18n import _
from cinder.image import image_utils
from cinder.openstack.common import fileutils
from cinder.openstack.common import log as logging
from cinder import utils
from cinder.volume.drivers import smbfs
from cinder.volume.drivers.windows import remotefs
from cinder.volume.drivers.windows import vhdutils
VERSION = '1.0.0'
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.set_default('smbfs_shares_config', r'C:\OpenStack\smbfs_shares.txt')
CONF.set_default('smbfs_mount_point_base', r'C:\OpenStack\_mnt')
CONF.set_default('smbfs_default_volume_format', 'vhd')
class WindowsSmbfsDriver(smbfs.SmbfsDriver):
VERSION = VERSION
def __init__(self, *args, **kwargs):
super(WindowsSmbfsDriver, self).__init__(*args, **kwargs)
self.base = getattr(self.configuration,
'smbfs_mount_point_base',
CONF.smbfs_mount_point_base)
opts = getattr(self.configuration,
'smbfs_mount_options',
CONF.smbfs_mount_options)
self._remotefsclient = remotefs.WindowsRemoteFsClient(
'cifs', root_helper=None, smbfs_mount_point_base=self.base,
smbfs_mount_options=opts)
self.vhdutils = vhdutils.VHDUtils()
def do_setup(self, context):
self._check_os_platform()
super(WindowsSmbfsDriver, self).do_setup(context)
def _check_os_platform(self):
if sys.platform != 'win32':
_msg = _("This system platform (%s) is not supported. This "
"driver supports only Win32 platforms.") % sys.platform
raise exception.SmbfsException(_msg)
def _do_create_volume(self, volume):
volume_path = self.local_path(volume)
volume_format = self.get_volume_format(volume)
volume_size_bytes = volume['size'] * units.Gi
if os.path.exists(volume_path):
err_msg = _('File already exists at: %s') % volume_path
raise exception.InvalidVolume(err_msg)
if volume_format not in (self._DISK_FORMAT_VHD,
self._DISK_FORMAT_VHDX):
err_msg = _("Unsupported volume format: %s ") % volume_format
raise exception.InvalidVolume(err_msg)
self.vhdutils.create_dynamic_vhd(volume_path, volume_size_bytes)
def _ensure_share_mounted(self, smbfs_share):
mnt_options = {}
if self.shares.get(smbfs_share) is not None:
mnt_flags = self.shares[smbfs_share]
mnt_options = self.parse_options(mnt_flags)[1]
self._remotefsclient.mount(smbfs_share, mnt_options)
def _delete(self, path):
fileutils.delete_if_exists(path)
def _get_capacity_info(self, smbfs_share):
"""Calculate available space on the SMBFS share.
:param smbfs_share: example //172.18.194.100/var/smbfs
"""
total_size, total_available = self._remotefsclient.get_capacity_info(
smbfs_share)
total_allocated = self._get_total_allocated(smbfs_share)
return_value = [total_size, total_available, total_allocated]
LOG.info('Smb share %s Total size %s Total allocated %s'
% (smbfs_share, total_size, total_allocated))
return [float(x) for x in return_value]
def _get_total_allocated(self, smbfs_share):
elements = os.listdir(smbfs_share)
total_allocated = 0
for element in elements:
element_path = os.path.join(smbfs_share, element)
if not self._remotefsclient.is_symlink(element_path):
if "snapshot" in element:
continue
if re.search(r'\.vhdx?$', element):
total_allocated += self.vhdutils.get_vhd_size(
element_path)['VirtualSize']
continue
if os.path.isdir(element_path):
total_allocated += self._get_total_allocated(element_path)
continue
total_allocated += os.path.getsize(element_path)
return total_allocated
def _img_commit(self, snapshot_path):
self.vhdutils.merge_vhd(snapshot_path)
self._delete(snapshot_path)
def _rebase_img(self, image, backing_file, volume_format):
# Relative path names are not supported in this case.
image_dir = os.path.dirname(image)
backing_file_path = os.path.join(image_dir, backing_file)
self.vhdutils.reconnect_parent(image, backing_file_path)
def _qemu_img_info(self, path, volume_name=None):
# This code expects to deal only with relative filenames.
# As this method is needed by the upper class and qemu-img does
# not fully support vhdx images, for the moment we'll use Win32 API
# for retrieving image information.
parent_path = self.vhdutils.get_vhd_parent_path(path)
file_format = os.path.splitext(path)[1][1:].lower()
if parent_path:
backing_file_name = os.path.split(parent_path)[1].lower()
else:
backing_file_name = None
class ImageInfo(object):
def __init__(self, image, backing_file):
self.image = image
self.backing_file = backing_file
self.file_format = file_format
return ImageInfo(os.path.basename(path),
backing_file_name)
def _do_create_snapshot(self, snapshot, backing_file, new_snap_path):
backing_file_full_path = os.path.join(
self._local_volume_dir(snapshot['volume']),
backing_file)
self.vhdutils.create_differencing_vhd(new_snap_path,
backing_file_full_path)
def _do_extend_volume(self, volume_path, size_gb):
self.vhdutils.resize_vhd(volume_path, size_gb * units.Gi)
@utils.synchronized('smbfs', external=False)
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
# If snapshots exist, flatten to a temporary image, and upload it
active_file = self.get_active_image_from_info(volume)
active_file_path = os.path.join(self._local_volume_dir(volume),
active_file)
backing_file = self.vhdutils.get_vhd_parent_path(active_file_path)
root_file_fmt = self.get_volume_format(volume)
temp_path = None
try:
if backing_file or root_file_fmt == self._DISK_FORMAT_VHDX:
temp_file_name = '%s.temp_image.%s.%s' % (
volume['id'],
image_meta['id'],
self._DISK_FORMAT_VHD)
temp_path = os.path.join(self._local_volume_dir(volume),
temp_file_name)
self.vhdutils.convert_vhd(active_file_path, temp_path)
upload_path = temp_path
else:
upload_path = active_file_path
image_utils.upload_volume(context,
image_service,
image_meta,
upload_path,
self._DISK_FORMAT_VHD)
finally:
if temp_path:
self._delete(temp_path)
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
volume_format = self.get_volume_format(volume, qemu_format=True)
image_meta = image_service.show(context, image_id)
fetch_format = volume_format
fetch_path = self.local_path(volume)
self._delete(fetch_path)
qemu_version = self.get_qemu_version()
needs_conversion = False
if (qemu_version < [1, 7] and (
volume_format == self._DISK_FORMAT_VHDX and
image_meta['disk_format'] != self._DISK_FORMAT_VHDX)):
needs_conversion = True
fetch_format = 'vpc'
temp_file_name = '%s.temp_image.%s.%s' % (
volume['id'],
image_meta['id'],
self._DISK_FORMAT_VHD)
fetch_path = os.path.join(self._local_volume_dir(volume),
temp_file_name)
image_utils.fetch_to_volume_format(
context, image_service, image_id,
fetch_path, fetch_format,
self.configuration.volume_dd_blocksize)
if needs_conversion:
self.vhdutils.convert_vhd(fetch_path, self.local_path(volume))
self._delete(fetch_path)
self.vhdutils.resize_vhd(self.local_path(volume),
volume['size'] * units.Gi)
def _copy_volume_from_snapshot(self, snapshot, volume, volume_size):
"""Copy data from snapshot to destination volume."""
LOG.debug("snapshot: %(snap)s, volume: %(vol)s, "
"volume_size: %(size)s" %
{'snap': snapshot['id'],
'vol': volume['id'],
'size': snapshot['volume_size']})
info_path = self._local_path_volume_info(snapshot['volume'])
snap_info = self._read_info_file(info_path)
vol_dir = self._local_volume_dir(snapshot['volume'])
forward_file = snap_info[snapshot['id']]
forward_path = os.path.join(vol_dir, forward_file)
# Find the file which backs this file, which represents the point
# when this snapshot was created.
img_info = self._qemu_img_info(forward_path)
snapshot_path = os.path.join(vol_dir, img_info.backing_file)
volume_path = self.local_path(volume)
self._delete(volume_path)
self.vhdutils.convert_vhd(snapshot_path,
volume_path)
self.vhdutils.resize_vhd(volume_path, volume_size * units.Gi)
|
apache-2.0
| 1,899,625,683,295,123,000
| 39.201493
| 79
| 0.591981
| false
| 3.906454
| false
| false
| false
|
hellobond/python-smartypants
|
lib/smartypants.py
|
1
|
34216
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
r"""
==============
smartypants.py
==============
----------------------------
SmartyPants ported to Python
----------------------------
Ported by `Chad Miller`_
Copyright (c) 2004, 2007 Chad Miller
original `SmartyPants`_ by `John Gruber`_
Copyright (c) 2003 John Gruber
Synopsis
========
A smart-quotes plugin for Pyblosxom_.
The priginal "SmartyPants" is a free web publishing plug-in for Movable Type,
Blosxom, and BBEdit that easily translates plain ASCII punctuation characters
into "smart" typographic punctuation HTML entities.
This software, *smartypants.py*, endeavours to be a functional port of
SmartyPants to Python, for use with Pyblosxom_.
Description
===========
SmartyPants can perform the following transformations:
- Straight quotes ( " and ' ) into "curly" quote HTML entities
- Backticks-style quotes (\`\`like this'') into "curly" quote HTML entities
- Dashes (``--`` and ``---``) into en- and em-dash entities
- Three consecutive dots (``...`` or ``. . .``) into an ellipsis entity
This means you can write, edit, and save your posts using plain old
ASCII straight quotes, plain dashes, and plain dots, but your published
posts (and final HTML output) will appear with smart quotes, em-dashes,
and proper ellipses.
SmartyPants does not modify characters within ``<pre>``, ``<code>``, ``<tt>``,
``<kbd>``, ``<math>`` or ``<script>`` tag blocks. Typically, these tags are
used to display text where smart quotes and other "smart punctuation" would
not be appropriate, such as source code or example markup.
Backslash Escapes
=================
By default escapes are not processed. The process escapes, the
`process_escapes=True` keyword argument must be passed. See below for a
description of what this does.
If you need to use literal straight quotes (or plain hyphens and
periods), SmartyPants accepts the following backslash escape sequences
to force non-smart punctuation. It does so by transforming the escape
sequence into a decimal-encoded HTML entity:
(FIXME: table here.)
.. comment It sucks that there's a disconnect between the visual layout and table markup when special characters are involved.
.. comment ====== ===== =========
.. comment Escape Value Character
.. comment ====== ===== =========
.. comment \\\\\\\\ \ \\\\
.. comment \\\\" " "
.. comment \\\\' ' '
.. comment \\\\. . .
.. comment \\\\- - \-
.. comment \\\\` ` \`
.. comment ====== ===== =========
This is useful, for example, when you want to use straight quotes as
foot and inch marks: 6'2" tall; a 17" iMac.
Options
=======
For Pyblosxom users, the ``smartypants_attributes`` attribute is where you
specify configuration options.
Numeric values are the easiest way to configure SmartyPants' behavior:
"0"
Suppress all transformations. (Do nothing.)
"1"
Performs default SmartyPants transformations: quotes (including
\`\`backticks'' -style), em-dashes, and ellipses. "``--``" (dash dash)
is used to signify an em-dash; there is no support for en-dashes.
"2"
Same as smarty_pants="1", except that it uses the old-school typewriter
shorthand for dashes: "``--``" (dash dash) for en-dashes, "``---``"
(dash dash dash)
for em-dashes.
"3"
Same as smarty_pants="2", but inverts the shorthand for dashes:
"``--``" (dash dash) for em-dashes, and "``---``" (dash dash dash) for
en-dashes.
"-1"
Stupefy mode. Reverses the SmartyPants transformation process, turning
the HTML entities produced by SmartyPants into their ASCII equivalents.
E.g. "“" is turned into a simple double-quote ("), "—" is
turned into two dashes, etc.
Additionally, shorthands that are specific to Bond can be used to configure
the behavior of SmartyPants':
"B1"
Performs the following transformations: single (``'``) and double (``"``)
quotes. There is not support for bacticks, dashes (en- and em-), or
ellipses.
The following single-character attribute values can be combined to toggle
individual transformations from within the smarty_pants attribute. For
example, to educate normal quotes and em-dashes, but not ellipses or
\`\`backticks'' -style quotes:
``py['smartypants_attributes'] = "1"``
"q"
Educates normal quote characters: (") and (').
"b"
Educates \`\`backticks'' -style double quotes.
"B"
Educates \`\`backticks'' -style double quotes and \`single' quotes.
"d"
Educates em-dashes.
"D"
Educates em-dashes and en-dashes, using old-school typewriter shorthand:
(dash dash) for en-dashes, (dash dash dash) for em-dashes.
"i"
Educates em-dashes and en-dashes, using inverted old-school typewriter
shorthand: (dash dash) for em-dashes, (dash dash dash) for en-dashes.
"e"
Educates ellipses.
"w"
Translates any instance of ``"`` into a normal double-quote character.
This should be of no interest to most people, but of particular interest
to anyone who writes their posts using Dreamweaver, as Dreamweaver
inexplicably uses this entity to represent a literal double-quote
character. SmartyPants only educates normal quotes, not entities (because
ordinarily, entities are used for the explicit purpose of representing the
specific character they represent). The "w" option must be used in
conjunction with one (or both) of the other quote options ("q" or "b").
Thus, if you wish to apply all SmartyPants transformations (quotes, en-
and em-dashes, and ellipses) and also translate ``"`` entities into
regular quotes so SmartyPants can educate them, you should pass the
following to the smarty_pants attribute:
The ``smartypants_forbidden_flavours`` list contains pyblosxom flavours for
which no Smarty Pants rendering will occur.
Caveats
=======
Why You Might Not Want to Use Smart Quotes in Your Weblog
---------------------------------------------------------
For one thing, you might not care.
Most normal, mentally stable individuals do not take notice of proper
typographic punctuation. Many design and typography nerds, however, break
out in a nasty rash when they encounter, say, a restaurant sign that uses
a straight apostrophe to spell "Joe's".
If you're the sort of person who just doesn't care, you might well want to
continue not caring. Using straight quotes -- and sticking to the 7-bit
ASCII character set in general -- is certainly a simpler way to live.
Even if you *do* care about accurate typography, you still might want to
think twice before educating the quote characters in your weblog. One side
effect of publishing curly quote HTML entities is that it makes your
weblog a bit harder for others to quote from using copy-and-paste. What
happens is that when someone copies text from your blog, the copied text
contains the 8-bit curly quote characters (as well as the 8-bit characters
for em-dashes and ellipses, if you use these options). These characters
are not standard across different text encoding methods, which is why they
need to be encoded as HTML entities.
People copying text from your weblog, however, may not notice that you're
using curly quotes, and they'll go ahead and paste the unencoded 8-bit
characters copied from their browser into an email message or their own
weblog. When pasted as raw "smart quotes", these characters are likely to
get mangled beyond recognition.
That said, my own opinion is that any decent text editor or email client
makes it easy to stupefy smart quote characters into their 7-bit
equivalents, and I don't consider it my problem if you're using an
indecent text editor or email client.
Algorithmic Shortcomings
------------------------
One situation in which quotes will get curled the wrong way is when
apostrophes are used at the start of leading contractions. For example:
``'Twas the night before Christmas.``
In the case above, SmartyPants will turn the apostrophe into an opening
single-quote, when in fact it should be a closing one. I don't think
this problem can be solved in the general case -- every word processor
I've tried gets this wrong as well. In such cases, it's best to use the
proper HTML entity for closing single-quotes (``’``) by hand.
Bugs
====
To file bug reports or feature requests (other than topics listed in the
Caveats section above) please send email to: mailto:smartypantspy@chad.org
If the bug involves quotes being curled the wrong way, please send example
text to illustrate.
To Do list
----------
- Provide a function for use within templates to quote anything at all.
Version History
===============
1.5_2.1: Fri, 24 Oct 2014, 18:53:25 -0400
- Added option to process escapes. By default backslash escapes will
not be processed.
1.5_2.0: Thu, 04 Sep 2014 12:31:22 -0400
- Added unicode output option and added new attributes (Bond usage cases).
Note that version number jumps to reflect fork implementations.
1.5_1.6: Fri, 27 Jul 2007 07:06:40 -0400
- Fixed bug where blocks of precious unalterable text was instead
interpreted. Thanks to Le Roux and Dirk van Oosterbosch.
1.5_1.5: Sat, 13 Aug 2005 15:50:24 -0400
- Fix bogus magical quotation when there is no hint that the
user wants it, e.g., in "21st century". Thanks to Nathan Hamblen.
- Be smarter about quotes before terminating numbers in an en-dash'ed
range.
1.5_1.4: Thu, 10 Feb 2005 20:24:36 -0500
- Fix a date-processing bug, as reported by jacob childress.
- Begin a test-suite for ensuring correct output.
- Removed import of "string", since I didn't really need it.
(This was my first every Python program. Sue me!)
1.5_1.3: Wed, 15 Sep 2004 18:25:58 -0400
- Abort processing if the flavour is in forbidden-list. Default of
[ "rss" ] (Idea of Wolfgang SCHNERRING.)
- Remove stray virgules from en-dashes. Patch by Wolfgang SCHNERRING.
1.5_1.2: Mon, 24 May 2004 08:14:54 -0400
- Some single quotes weren't replaced properly. Diff-tesuji played
by Benjamin GEIGER.
1.5_1.1: Sun, 14 Mar 2004 14:38:28 -0500
- Support upcoming pyblosxom 0.9 plugin verification feature.
1.5_1.0: Tue, 09 Mar 2004 08:08:35 -0500
- Initial release
Version Information
-------------------
Version numbers will track the SmartyPants_ version numbers, with the addition
of an underscore and the smartypants.py version on the end.
New versions will be available at `http://wiki.chad.org/SmartyPantsPy`_
.. _http://wiki.chad.org/SmartyPantsPy: http://wiki.chad.org/SmartyPantsPy
Authors
=======
`John Gruber`_ did all of the hard work of writing this software in Perl for
`Movable Type`_ and almost all of this useful documentation. `Chad Miller`_
ported it to Python to use with Pyblosxom_.
Additional Credits
==================
Portions of the SmartyPants original work are based on Brad Choate's nifty
MTRegex plug-in. `Brad Choate`_ also contributed a few bits of source code to
this plug-in. Brad Choate is a fine hacker indeed.
`Jeremy Hedley`_ and `Charles Wiltgen`_ deserve mention for exemplary beta
testing of the original SmartyPants.
`Rael Dornfest`_ ported SmartyPants to Blosxom.
.. _Brad Choate: http://bradchoate.com/
.. _Jeremy Hedley: http://antipixel.com/
.. _Charles Wiltgen: http://playbacktime.com/
.. _Rael Dornfest: http://raelity.org/
Copyright and License
=====================
SmartyPants_ license::
Copyright (c) 2003 John Gruber
(http://daringfireball.net/)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name "SmartyPants" nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
This software is provided by the copyright holders and contributors "as
is" and any express or implied warranties, including, but not limited
to, the implied warranties of merchantability and fitness for a
particular purpose are disclaimed. In no event shall the copyright
owner or contributors be liable for any direct, indirect, incidental,
special, exemplary, or consequential damages (including, but not
limited to, procurement of substitute goods or services; loss of use,
data, or profits; or business interruption) however caused and on any
theory of liability, whether in contract, strict liability, or tort
(including negligence or otherwise) arising in any way out of the use
of this software, even if advised of the possibility of such damage.
smartypants.py license::
smartypants.py is a derivative work of SmartyPants.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
This software is provided by the copyright holders and contributors "as
is" and any express or implied warranties, including, but not limited
to, the implied warranties of merchantability and fitness for a
particular purpose are disclaimed. In no event shall the copyright
owner or contributors be liable for any direct, indirect, incidental,
special, exemplary, or consequential damages (including, but not
limited to, procurement of substitute goods or services; loss of use,
data, or profits; or business interruption) however caused and on any
theory of liability, whether in contract, strict liability, or tort
(including negligence or otherwise) arising in any way out of the use
of this software, even if advised of the possibility of such damage.
.. _John Gruber: http://daringfireball.net/
.. _Chad Miller: http://web.chad.org/
.. _Pyblosxom: http://roughingit.subtlehints.net/pyblosxom
.. _SmartyPants: http://daringfireball.net/projects/smartypants/
.. _Movable Type: http://www.movabletype.org/
"""
# TODO: Add usage examples
default_smartypants_attr = "B1" # Like a BOSS!
import re
import htmlentitydefs
tags_to_skip_regex = re.compile(r"<(/)?(pre|code|tt|kbd|script|math)[^>]*>", re.I)
def verify_installation(request):
return 1
# assert the plugin is functional
def cb_story(args):
global default_smartypants_attr
try:
forbidden_flavours = args["entry"]["smartypants_forbidden_flavours"]
except KeyError:
forbidden_flavours = [ "rss" ]
try:
attributes = args["entry"]["smartypants_attributes"]
except KeyError:
attributes = default_smartypants_attr
if attributes is None:
attributes = default_smartypants_attr
entryData = args["entry"].getData()
try:
if args["request"]["flavour"] in forbidden_flavours:
return
except KeyError:
if "<" in args["entry"]["body"][0:15]: # sniff the stream
return # abort if it looks like escaped HTML. FIXME
# FIXME: make these configurable, perhaps?
args["entry"]["body"] = smartyPants(entryData, attributes)
args["entry"]["title"] = smartyPants(args["entry"]["title"], attributes)
### interal functions below here
def smartyPants(text, attr=default_smartypants_attr, **kwargs):
"""Docstring.
Parameters
----------
unicode : bool
convert to unicode. If True a unicode string is returned (if required?)
and if False an HTML encoded string is returned.
"""
convert_quot = False # should we translate " entities into normal quotes?
# Parse attributes:
# 0 : do nothing
# 1 : set all
# 2 : set all, using old school en- and em- dash shortcuts
# 3 : set all, using inverted old school en and em- dash shortcuts
#
# q : quotes
# b : backtick quotes (``double'' only)
# B : backtick quotes (``double'' and `single')
# d : dashes
# D : old school dashes
# i : inverted old school dashes
# e : ellipses
# w : convert " entities to " for Dreamweaver users
skipped_tag_stack = []
do_dashes = "0"
do_backticks = "0"
do_quotes = "0"
do_ellipses = "0"
do_stupefy = "0"
if attr == "0":
# Do nothing.
return text
elif attr == "1":
do_quotes = "1"
do_backticks = "1"
do_dashes = "1"
do_ellipses = "1"
elif attr == "2":
# Do everything, turn all options on, use old school dash shorthand.
do_quotes = "1"
do_backticks = "1"
do_dashes = "2"
do_ellipses = "1"
elif attr == "3":
# Do everything, turn all options on, use inverted old school dash shorthand.
do_quotes = "1"
do_backticks = "1"
do_dashes = "3"
do_ellipses = "1"
elif attr == "-1":
# Special "stupefy" mode.
do_stupefy = "1"
# BOND Shorthands
elif attr == "B1":
do_quotes = "1"
else:
for c in attr:
if c == "q": do_quotes = "1"
elif c == "b": do_backticks = "1"
elif c == "B": do_backticks = "2"
elif c == "d": do_dashes = "1"
elif c == "D": do_dashes = "2"
elif c == "i": do_dashes = "3"
elif c == "e": do_ellipses = "1"
elif c == "w": convert_quot = "1"
else:
pass
# ignore unknown option
tokens = _tokenize(text)
result = []
in_pre = False
prev_token_last_char = ""
# This is a cheat, used to get some context
# for one-character tokens that consist of
# just a quote char. What we do is remember
# the last character of the previous text
# token, to use as context to curl single-
# character quote tokens correctly.
for cur_token in tokens:
if cur_token[0] == "tag":
# Don't mess with quotes inside some tags. This does not handle self <closing/> tags!
result.append(cur_token[1])
skip_match = tags_to_skip_regex.match(cur_token[1])
if skip_match is not None:
if not skip_match.group(1):
skipped_tag_stack.append(skip_match.group(2).lower())
in_pre = True
else:
if len(skipped_tag_stack) > 0:
if skip_match.group(2).lower() == skipped_tag_stack[-1]:
skipped_tag_stack.pop()
else:
pass
# This close doesn't match the open. This isn't XHTML. We should barf here.
if len(skipped_tag_stack) == 0:
in_pre = False
else:
t = cur_token[1]
last_char = t[-1:] # Remember last char of this token before processing.
if not in_pre:
oldstr = t
if kwargs.get('process_escapes', False): # only process escapes if requested.
t = processEscapes(t)
if convert_quot != "0":
t = re.sub('"', '"', t)
if do_dashes != "0":
if do_dashes == "1":
t = educateDashes(t)
if do_dashes == "2":
t = educateDashesOldSchool(t)
if do_dashes == "3":
t = educateDashesOldSchoolInverted(t)
if do_ellipses != "0":
t = educateEllipses(t)
# Note: backticks need to be processed before quotes.
if do_backticks != "0":
t = educateBackticks(t)
if do_backticks == "2":
t = educateSingleBackticks(t)
if do_quotes != "0":
if t == "'":
# Special case: single-character ' token
if re.match("\S", prev_token_last_char):
t = "’"
else:
t = "‘"
elif t == '"':
# Special case: single-character " token
if re.match("\S", prev_token_last_char):
t = "”"
else:
t = "“"
else:
# Normal case:
t = educateQuotes(t)
if do_stupefy == "1":
t = stupefyEntities(t)
prev_token_last_char = last_char
result.append(t)
output_text = "".join(result)
if kwargs.get('unicode'):
output_text = unescape_html(output_text)
return output_text
def educateQuotes(str):
"""
Parameter: String.
Returns: The string, with "educated" curly quote HTML entities.
Example input: "Isn't this fun?"
Example output: “Isn’t this fun?”
"""
oldstr = str
punct_class = r"""[!"#\$\%'()*+,-.\/:;<=>?\@\[\\\]\^_`{|}~]"""
# Special case if the very first character is a quote
# followed by punctuation at a non-word-break. Close the quotes by brute force:
str = re.sub(r"""^'(?=%s\\B)""" % (punct_class,), r"""’""", str)
str = re.sub(r"""^"(?=%s\\B)""" % (punct_class,), r"""”""", str)
# Special case for double sets of quotes, e.g.:
# <p>He said, "'Quoted' words in a larger quote."</p>
str = re.sub(r""""'(?=\w)""", """“‘""", str)
str = re.sub(r"""'"(?=\w)""", """‘“""", str)
# Special case for decade abbreviations (the '80s):
str = re.sub(r"""\b'(?=\d{2}s)""", r"""’""", str)
close_class = r"""[^\ \t\r\n\[\{\(\-]"""
dec_dashes = r"""–|—"""
# Get most opening single quotes:
opening_single_quotes_regex = re.compile(r"""
(
\s | # a whitespace char, or
| # a non-breaking space entity, or
-- | # dashes, or
&[mn]dash; | # named dash entities
%s | # or decimal entities
&\#x201[34]; # or hex
)
' # the quote
(?=\w) # followed by a word character
""" % (dec_dashes,), re.VERBOSE)
str = opening_single_quotes_regex.sub(r"""\1‘""", str)
closing_single_quotes_regex = re.compile(r"""
(%s)
'
(?!\s | s\b | \d)
""" % (close_class,), re.VERBOSE)
str = closing_single_quotes_regex.sub(r"""\1’""", str)
closing_single_quotes_regex = re.compile(r"""
(%s)
'
(\s | s\b)
""" % (close_class,), re.VERBOSE)
str = closing_single_quotes_regex.sub(r"""\1’\2""", str)
# Any remaining single quotes should be opening ones:
str = re.sub(r"""'""", r"""‘""", str)
# Get most opening double quotes:
opening_double_quotes_regex = re.compile(r"""
(
\s | # a whitespace char, or
| # a non-breaking space entity, or
-- | # dashes, or
&[mn]dash; | # named dash entities
%s | # or decimal entities
&\#x201[34]; # or hex
)
" # the quote
(?=\w) # followed by a word character
""" % (dec_dashes,), re.VERBOSE)
str = opening_double_quotes_regex.sub(r"""\1“""", str)
# Double closing quotes:
closing_double_quotes_regex = re.compile(r"""
#(%s)? # character that indicates the quote should be closing
"
(?=\s)
""" % (close_class,), re.VERBOSE)
str = closing_double_quotes_regex.sub(r"""”""", str)
closing_double_quotes_regex = re.compile(r"""
(%s) # character that indicates the quote should be closing
"
""" % (close_class,), re.VERBOSE)
str = closing_double_quotes_regex.sub(r"""\1”""", str)
# Any remaining quotes should be opening ones.
str = re.sub(r'"', r"""“""", str)
return str
def educateBackticks(str):
"""
Parameter: String.
Returns: The string, with ``backticks'' -style double quotes
translated into HTML curly quote entities.
Example input: ``Isn't this fun?''
Example output: “Isn't this fun?”
"""
str = re.sub(r"""``""", r"""“""", str)
str = re.sub(r"""''""", r"""”""", str)
return str
def educateSingleBackticks(str):
"""
Parameter: String.
Returns: The string, with `backticks' -style single quotes
translated into HTML curly quote entities.
Example input: `Isn't this fun?'
Example output: ‘Isn’t this fun?’
"""
str = re.sub(r"""`""", r"""‘""", str)
str = re.sub(r"""'""", r"""’""", str)
return str
def educateDashes(str):
"""
Parameter: String.
Returns: The string, with each instance of "--" translated to
an em-dash HTML entity.
"""
str = re.sub(r"""---""", r"""–""", str) # en (yes, backwards)
str = re.sub(r"""--""", r"""—""", str) # em (yes, backwards)
return str
def educateDashesOldSchool(str):
"""
Parameter: String.
Returns: The string, with each instance of "--" translated to
an en-dash HTML entity, and each "---" translated to
an em-dash HTML entity.
"""
str = re.sub(r"""---""", r"""—""", str) # em (yes, backwards)
str = re.sub(r"""--""", r"""–""", str) # en (yes, backwards)
return str
def educateDashesOldSchoolInverted(str):
"""
Parameter: String.
Returns: The string, with each instance of "--" translated to
an em-dash HTML entity, and each "---" translated to
an en-dash HTML entity. Two reasons why: First, unlike the
en- and em-dash syntax supported by
EducateDashesOldSchool(), it's compatible with existing
entries written before SmartyPants 1.1, back when "--" was
only used for em-dashes. Second, em-dashes are more
common than en-dashes, and so it sort of makes sense that
the shortcut should be shorter to type. (Thanks to Aaron
Swartz for the idea.)
"""
str = re.sub(r"""---""", r"""–""", str) # em
str = re.sub(r"""--""", r"""—""", str) # en
return str
def educateEllipses(str):
"""
Parameter: String.
Returns: The string, with each instance of "..." translated to
an ellipsis HTML entity.
Example input: Huh...?
Example output: Huh…?
"""
str = re.sub(r"""\.\.\.""", r"""…""", str)
str = re.sub(r"""\. \. \.""", r"""…""", str)
return str
def stupefyEntities(str):
"""
Parameter: String.
Returns: The string, with each SmartyPants HTML entity translated to
its ASCII counterpart.
Example input: “Hello — world.”
Example output: "Hello -- world."
"""
str = re.sub(r"""–""", r"""-""", str) # en-dash
str = re.sub(r"""—""", r"""--""", str) # em-dash
str = re.sub(r"""‘""", r"""'""", str) # open single quote
str = re.sub(r"""’""", r"""'""", str) # close single quote
str = re.sub(r"""“""", r'''"''', str) # open double quote
str = re.sub(r"""”""", r'''"''', str) # close double quote
str = re.sub(r"""…""", r"""...""", str)# ellipsis
return str
def processEscapes(str):
r"""
Parameter: String.
Returns: The string, with after processing the following backslash
escape sequences. This is useful if you want to force a "dumb"
quote or other character to appear.
Escape Value
------ -----
\\ \
\" "
\' '
\. .
\- -
\` `
"""
str = re.sub(r"""\\\\""", r"""\""", str)
str = re.sub(r'''\\"''', r""""""", str)
str = re.sub(r"""\\'""", r"""'""", str)
str = re.sub(r"""\\\.""", r""".""", str)
str = re.sub(r"""\\-""", r"""-""", str)
str = re.sub(r"""\\`""", r"""`""", str)
return str
def _tokenize(str):
"""
Parameter: String containing HTML markup.
Returns: Reference to an array of the tokens comprising the input
string. Each token is either a tag (possibly with nested,
tags contained therein, such as <a href="<MTFoo>">, or a
run of text between tags. Each element of the array is a
two-element array; the first is either 'tag' or 'text';
the second is the actual value.
Based on the _tokenize() subroutine from Brad Choate's MTRegex plugin.
<http://www.bradchoate.com/past/mtregex.php>
"""
pos = 0
length = len(str)
tokens = []
depth = 6
nested_tags = "|".join(['(?:<(?:[^<>]',] * depth) + (')*>)' * depth)
#match = r"""(?: <! ( -- .*? -- \s* )+ > ) | # comments
# (?: <\? .*? \?> ) | # directives
# %s # nested tags """ % (nested_tags,)
tag_soup = re.compile(r"""([^<]*)(<[^>]*>)""")
token_match = tag_soup.search(str)
previous_end = 0
while token_match is not None:
if token_match.group(1):
tokens.append(['text', token_match.group(1)])
tokens.append(['tag', token_match.group(2)])
previous_end = token_match.end()
token_match = tag_soup.search(str, token_match.end())
if previous_end < len(str):
tokens.append(['text', str[previous_end:]])
return tokens
### New Functions
def unescape_html(text):
"""Replaces HTML/XML character references in a string with unicode
encodings.
SRC: http://effbot.org/zone/re-sub.htm#unescape-html
`October 28, 2006 | Fredrik Lundh`
:param text: HTML/XML encoded source text
:rtype: string or unicode (if necessary)
"""
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return unichr(int(text[3:-1], 16))
else:
return unichr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
return re.sub("&#?\w+;", fixup, text)
if __name__ == "__main__":
import locale
try:
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_string
docstring_html = publish_string(__doc__, writer_name='html')
print docstring_html
# Unit test output goes out stderr. No worries.
import unittest
sp = smartyPants
class TestSmartypantsAllAttributes(unittest.TestCase):
# the default attribute is "1", which means "all".
def test_dates(self):
self.assertEqual(sp("1440-80's"), "1440-80’s")
self.assertEqual(sp("1440-'80s"), "1440-‘80s")
self.assertEqual(sp("1440---'80s"), "1440–‘80s")
self.assertEqual(sp("1960s"), "1960s") # no effect.
self.assertEqual(sp("1960's"), "1960’s")
self.assertEqual(sp("one two '60s"), "one two ‘60s")
self.assertEqual(sp("'60s"), "‘60s")
def test_skip_tags(self):
self.assertEqual(
sp("""<script type="text/javascript">\n<!--\nvar href = "http://www.google.com";\nvar linktext = "google";\ndocument.write('<a href="' + href + '">' + linktext + "</a>");\n//-->\n</script>"""),
"""<script type="text/javascript">\n<!--\nvar href = "http://www.google.com";\nvar linktext = "google";\ndocument.write('<a href="' + href + '">' + linktext + "</a>");\n//-->\n</script>""")
self.assertEqual(
sp("""<p>He said "Let's write some code." This code here <code>if True:\n\tprint "Okay"</code> is python code.</p>"""),
"""<p>He said “Let’s write some code.” This code here <code>if True:\n\tprint "Okay"</code> is python code.</p>""")
def test_ordinal_numbers(self):
self.assertEqual(sp("21st century"), "21st century") # no effect.
self.assertEqual(sp("3rd"), "3rd") # no effect.
def test_educated_quotes(self):
self.assertEqual(sp('''"Isn't this fun?"'''), '''“Isn’t this fun?”''')
unittest.main()
__author__ = "Anthony O'Brien <anthony@bondgifts.com>"
__version__ = "1.5_2.0"
__url__ = "https://github.com/bondgifts/python-smartypants"
__description__ = "Smart-quotes, smart-ellipses, and smart-dashes for unicode and HTML/XML usage."
|
bsd-3-clause
| 1,224,853,548,445,480,000
| 34.093333
| 209
| 0.594868
| false
| 3.684687
| false
| false
| false
|
hueyyeng/AssetsBrowser
|
ui/window/ui_main.py
|
1
|
12124
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'K:\Library\Python\AssetsBrowser\ui\window\main.ui'
#
# Created by: PyQt5 UI code generator 5.14.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.setWindowModality(QtCore.Qt.NonModal)
MainWindow.resize(851, 603)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setMinimumSize(QtCore.QSize(800, 600))
MainWindow.setMaximumSize(QtCore.QSize(16777215, 16777215))
font = QtGui.QFont()
font.setPointSize(8)
MainWindow.setFont(font)
MainWindow.setDocumentMode(False)
MainWindow.setTabShape(QtWidgets.QTabWidget.Rounded)
self.centralWidget = QtWidgets.QWidget(MainWindow)
self.centralWidget.setEnabled(True)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.centralWidget.sizePolicy().hasHeightForWidth())
self.centralWidget.setSizePolicy(sizePolicy)
self.centralWidget.setMinimumSize(QtCore.QSize(800, 550))
self.centralWidget.setObjectName("centralWidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralWidget)
self.gridLayout.setObjectName("gridLayout")
self.splitter = QtWidgets.QSplitter(self.centralWidget)
self.splitter.setEnabled(True)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setChildrenCollapsible(False)
self.splitter.setObjectName("splitter")
self.actionWidget = QtWidgets.QWidget(self.splitter)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.actionWidget.sizePolicy().hasHeightForWidth())
self.actionWidget.setSizePolicy(sizePolicy)
self.actionWidget.setMinimumSize(QtCore.QSize(230, 0))
self.actionWidget.setMaximumSize(QtCore.QSize(230, 16777215))
self.actionWidget.setObjectName("actionWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.actionWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.labelProject = QtWidgets.QLabel(self.actionWidget)
self.labelProject.setObjectName("labelProject")
self.verticalLayout.addWidget(self.labelProject)
self.projectComboBox = QtWidgets.QComboBox(self.actionWidget)
self.projectComboBox.setLayoutDirection(QtCore.Qt.LeftToRight)
self.projectComboBox.setSizeAdjustPolicy(QtWidgets.QComboBox.AdjustToContentsOnFirstShow)
self.projectComboBox.setFrame(True)
self.projectComboBox.setObjectName("projectComboBox")
self.verticalLayout.addWidget(self.projectComboBox)
self.pushBtnNewAsset = QtWidgets.QPushButton(self.actionWidget)
self.pushBtnNewAsset.setObjectName("pushBtnNewAsset")
self.verticalLayout.addWidget(self.pushBtnNewAsset)
self.pushBtnNewAssetItem = QtWidgets.QPushButton(self.actionWidget)
self.pushBtnNewAssetItem.setObjectName("pushBtnNewAssetItem")
self.verticalLayout.addWidget(self.pushBtnNewAssetItem)
self.pushBtnManageFormat = QtWidgets.QPushButton(self.actionWidget)
self.pushBtnManageFormat.setObjectName("pushBtnManageFormat")
self.verticalLayout.addWidget(self.pushBtnManageFormat)
self.debugCheckBox = QtWidgets.QCheckBox(self.actionWidget)
self.debugCheckBox.setEnabled(True)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.debugCheckBox.sizePolicy().hasHeightForWidth())
self.debugCheckBox.setSizePolicy(sizePolicy)
self.debugCheckBox.setObjectName("debugCheckBox")
self.verticalLayout.addWidget(self.debugCheckBox)
self.textEdit = QtWidgets.QTextEdit(self.actionWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.textEdit.sizePolicy().hasHeightForWidth())
self.textEdit.setSizePolicy(sizePolicy)
self.textEdit.setObjectName("textEdit")
self.verticalLayout.addWidget(self.textEdit)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.MinimumExpanding)
self.verticalLayout.addItem(spacerItem)
self.labelCredits = QtWidgets.QLabel(self.actionWidget)
self.labelCredits.setTextFormat(QtCore.Qt.AutoText)
self.labelCredits.setOpenExternalLinks(True)
self.labelCredits.setObjectName("labelCredits")
self.verticalLayout.addWidget(self.labelCredits)
self.labelCredits.raise_()
self.debugCheckBox.raise_()
self.pushBtnNewAsset.raise_()
self.labelProject.raise_()
self.textEdit.raise_()
self.projectComboBox.raise_()
self.pushBtnNewAssetItem.raise_()
self.pushBtnManageFormat.raise_()
self.tabWidget = QtWidgets.QTabWidget(self.splitter)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tabWidget.sizePolicy().hasHeightForWidth())
self.tabWidget.setSizePolicy(sizePolicy)
self.tabWidget.setObjectName("tabWidget")
self.tabHelp = QtWidgets.QWidget()
self.tabHelp.setObjectName("tabHelp")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.tabHelp)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.frameHelp = QtWidgets.QFrame(self.tabHelp)
self.frameHelp.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frameHelp.setFrameShadow(QtWidgets.QFrame.Sunken)
self.frameHelp.setObjectName("frameHelp")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.frameHelp)
self.horizontalLayout.setObjectName("horizontalLayout")
self.textBrowserHelp = QtWidgets.QTextBrowser(self.frameHelp)
self.textBrowserHelp.setOpenExternalLinks(True)
self.textBrowserHelp.setObjectName("textBrowserHelp")
self.horizontalLayout.addWidget(self.textBrowserHelp)
self.horizontalLayout_3.addWidget(self.frameHelp)
self.tabWidget.addTab(self.tabHelp, "")
self.gridLayout.addWidget(self.splitter, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralWidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 851, 21))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuView = QtWidgets.QMenu(self.menubar)
self.menuView.setObjectName("menuView")
self.menuHelp = QtWidgets.QMenu(self.menubar)
self.menuHelp.setObjectName("menuHelp")
self.menuSettings = QtWidgets.QMenu(self.menubar)
self.menuSettings.setObjectName("menuSettings")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setEnabled(True)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionQuit = QtWidgets.QAction(MainWindow)
self.actionQuit.setObjectName("actionQuit")
self.actionAbout = QtWidgets.QAction(MainWindow)
self.actionAbout.setObjectName("actionAbout")
self.actionAlwaysOnTop = QtWidgets.QAction(MainWindow)
self.actionAlwaysOnTop.setCheckable(True)
self.actionAlwaysOnTop.setObjectName("actionAlwaysOnTop")
self.actionPreferences = QtWidgets.QAction(MainWindow)
self.actionPreferences.setObjectName("actionPreferences")
self.actionNewAsset = QtWidgets.QAction(MainWindow)
self.actionNewAsset.setObjectName("actionNewAsset")
self.actionNewAssetItem = QtWidgets.QAction(MainWindow)
self.actionNewAssetItem.setObjectName("actionNewAssetItem")
self.actionApplicationsList = QtWidgets.QAction(MainWindow)
self.actionApplicationsList.setObjectName("actionApplicationsList")
self.actionManageFormat = QtWidgets.QAction(MainWindow)
self.actionManageFormat.setObjectName("actionManageFormat")
self.menuFile.addAction(self.actionNewAsset)
self.menuFile.addAction(self.actionNewAssetItem)
self.menuFile.addAction(self.actionManageFormat)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionQuit)
self.menuView.addAction(self.actionAlwaysOnTop)
self.menuHelp.addAction(self.actionAbout)
self.menuSettings.addAction(self.actionPreferences)
self.menuSettings.addAction(self.actionApplicationsList)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuView.menuAction())
self.menubar.addAction(self.menuSettings.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Assets Browser"))
self.labelProject.setText(_translate("MainWindow", "Project:"))
self.pushBtnNewAsset.setText(_translate("MainWindow", "Create New Asset"))
self.pushBtnNewAssetItem.setText(_translate("MainWindow", "Create New Asset Item"))
self.pushBtnManageFormat.setText(_translate("MainWindow", "Manage Asset Item Format"))
self.debugCheckBox.setText(_translate("MainWindow", "Show Debug Panel"))
self.labelCredits.setText(_translate("MainWindow", "<html><head/><body><p>Huey Yeng © 2017-2021</p><p><a href=\"https://taukeke.com\"><span style=\" text-decoration: underline; color:#0000ff;\">taukeke.com<br/></span></a><a href=\"https://github.com/hueyyeng/AssetsBrowser\"><span style=\" text-decoration: underline; color:#0000ff;\">Assets Browser@ GitHub</span></a></p></body></html>"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tabHelp), _translate("MainWindow", "Help"))
self.menuFile.setTitle(_translate("MainWindow", "&File"))
self.menuView.setTitle(_translate("MainWindow", "&View"))
self.menuHelp.setTitle(_translate("MainWindow", "&Help"))
self.menuSettings.setTitle(_translate("MainWindow", "&Settings"))
self.actionQuit.setText(_translate("MainWindow", "&Quit"))
self.actionAbout.setText(_translate("MainWindow", "&About Assets Browser"))
self.actionAlwaysOnTop.setText(_translate("MainWindow", "Always on &Top"))
self.actionPreferences.setText(_translate("MainWindow", "&Preferences..."))
self.actionNewAsset.setText(_translate("MainWindow", "&New Asset"))
self.actionNewAssetItem.setText(_translate("MainWindow", "New &Asset Item"))
self.actionApplicationsList.setText(_translate("MainWindow", "&Applications List..."))
self.actionManageFormat.setText(_translate("MainWindow", "&Manage Asset Item Format"))
|
mit
| -3,948,126,092,516,333,000
| 58.136585
| 398
| 0.73043
| false
| 4.23733
| false
| false
| false
|
vhaupert/mitmproxy
|
mitmproxy/stateobject.py
|
1
|
3417
|
import json
import typing
from mitmproxy.coretypes import serializable
from mitmproxy.utils import typecheck
class StateObject(serializable.Serializable):
"""
An object with serializable state.
State attributes can either be serializable types(str, tuple, bool, ...)
or StateObject instances themselves.
"""
_stateobject_attributes: typing.ClassVar[typing.MutableMapping[str, typing.Any]]
"""
An attribute-name -> class-or-type dict containing all attributes that
should be serialized. If the attribute is a class, it must implement the
Serializable protocol.
"""
def get_state(self):
"""
Retrieve object state.
"""
state = {}
for attr, cls in self._stateobject_attributes.items():
val = getattr(self, attr)
state[attr] = get_state(cls, val)
return state
def set_state(self, state):
"""
Load object state from data returned by a get_state call.
"""
state = state.copy()
for attr, cls in self._stateobject_attributes.items():
val = state.pop(attr)
if val is None:
setattr(self, attr, val)
else:
curr = getattr(self, attr, None)
if hasattr(curr, "set_state"):
curr.set_state(val)
else:
setattr(self, attr, make_object(cls, val))
if state:
raise RuntimeWarning("Unexpected State in __setstate__: {}".format(state))
def _process(typeinfo: typecheck.Type, val: typing.Any, make: bool) -> typing.Any:
if val is None:
return None
elif make and hasattr(typeinfo, "from_state"):
return typeinfo.from_state(val)
elif not make and hasattr(val, "get_state"):
return val.get_state()
typename = str(typeinfo)
if typename.startswith("typing.List"):
T = typecheck.sequence_type(typeinfo)
return [_process(T, x, make) for x in val]
elif typename.startswith("typing.Tuple"):
Ts = typecheck.tuple_types(typeinfo)
if len(Ts) != len(val):
raise ValueError("Invalid data. Expected {}, got {}.".format(Ts, val))
return tuple(
_process(T, x, make) for T, x in zip(Ts, val)
)
elif typename.startswith("typing.Dict"):
k_cls, v_cls = typecheck.mapping_types(typeinfo)
return {
_process(k_cls, k, make): _process(v_cls, v, make)
for k, v in val.items()
}
elif typename.startswith("typing.Any"):
# This requires a bit of explanation. We can't import our IO layer here,
# because it causes a circular import. Rather than restructuring the
# code for this, we use JSON serialization, which has similar primitive
# type restrictions as tnetstring, to check for conformance.
try:
json.dumps(val)
except TypeError:
raise ValueError(f"Data not serializable: {val}")
return val
else:
return typeinfo(val)
def make_object(typeinfo: typecheck.Type, val: typing.Any) -> typing.Any:
"""Create an object based on the state given in val."""
return _process(typeinfo, val, True)
def get_state(typeinfo: typecheck.Type, val: typing.Any) -> typing.Any:
"""Get the state of the object given as val."""
return _process(typeinfo, val, False)
|
mit
| 1,843,769,143,436,569,600
| 33.515152
| 86
| 0.60755
| false
| 4.067857
| false
| false
| false
|
rechner/Taxidi
|
webcam.py
|
1
|
18402
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
#TODO: Update for use with new API
#TODO: Read config, optionally disable rectangle.
#TODO: Create Destroy() method
#TODO: Implement file system organization, handle converting & uploading image to server.
#TODO: Use gstreamer for Linux instead of opencv - better performance(?)
#This is needed for PIL to import in OS X (FIXME)
import sys
sys.path.append('/opt/local/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/site-packages')
import os
import wx
import wx.lib.imagebrowser as ib
import logging
import conf
from itertools import *
from operator import itemgetter
from PIL import Image
if conf.as_bool(conf.config['webcam']['enable']):
import opencv
from opencv import cv, highgui
class LivePanel(wx.Panel):
"""
Creates a wxPanel for capturing from an USB webcam with OpenCV, meaning
this works with all platforms OpenCV works with (Linux, OS X, Windows).
Initialize just like a wx.Panel, optionally specifying a camera index, starting
from 0. Default of -1 will automatically select the first useable device.
"""
def __init__(self, parent, id, camera=-1):
wx.Panel.__init__(self, parent, id, style=wx.NO_BORDER)
self.camera = camera
self.cap = highgui.cvCreateCameraCapture(camera)
wximg = wx.Image('resources/icons/camera-error-128.png')
self.errorBitmap = wximg.ConvertToBitmap()
self._error = 0
self.store = Storage()
self.Bind(wx.EVT_IDLE, self.onIdle)
def onIdle(self, event):
"""
Event to grab and display a frame from the camera. (internal use).
"""
if self.cap == None: #Should be cvCameraCapture instance.
#unbind the idle instance, change to click.
highgui.cvReleaseCapture(self.cap) #release the old instance and
self.cap = highgui.cvCreateCameraCapture(self.camera) #try new one.
self.displayError(self.errorBitmap, (128, 128))
raise CameraError('Unable to open camera, retrying....')
event.Skip()
try:
img = highgui.cvQueryFrame(self.cap)
except cv2.error as e:
raise CameraError('Error when querying for frame: {0}'.format(e))
self._error = 0 #worked successfully
img = opencv.cvGetMat(img)
cv.cvCvtColor(img, img, cv.CV_BGR2RGB)
if conf.as_bool(conf.config['webcam']['cropBars']):
#Draw cropping region
cv.cvRectangle(img, (80, -1), (560, 480), (205.0, 0.0, 0.0, 0.0), 2)
self.displayImage(img)
event.RequestMore()
def open(self, camera=-1):
"""
Open a capture device after __init__ has been called. Call close() first
before opening a new device. Takes camera index as an option.
"""
self.cap = highgui.cvCreateCameraCapture(camera)
self.Bind(wx.EVT_IDLE, self.onIdle)
pass
def close(self):
"""
Close a capture device and stops writing frames to the screen.
"""
highgui.cvReleaseCapture(self.cap)
self.Unbind(wx.EVT_IDLE)
def suspend(self):
"""
Suspend writing frames to the screen. Should be called when widget is hidden
to prevent excessive CPU usage.
"""
self.Unbind(wx.EVT_IDLE)
def resume(self):
"""
Resume reading and outputting frames.
"""
self.Bind(wx.EVT_IDLE, self.onIdle)
def displayImage(self, img, offset=(0,0)):
"""
Internal function for writing a bitmap grabbed from OpenCV to the panel.
"""
bitmap = wx.BitmapFromBuffer(img.width, img.height, img.imageData)
dc = wx.ClientDC(self)
dc.DrawBitmap(bitmap, offset[0], offset[1], False)
def displayError(self, bitmap, offset=(0,0)):
"""
Shows an error message saying the video device was not found.
Accepts bitmap as wx.Bitmap and position. Optimized for 128x128.
"""
#FIXME: (Minor) a bit of flicker on the error message.
if self._error > 2: #Only redraw if needed.
self.Unbind(wx.EVT_IDLE)
self.Unbind(wx.EVT_LEFT_UP) #just in case
self.Bind(wx.EVT_LEFT_UP, self.onClick)
return 0
boldfont = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT)
boldfont.SetWeight(wx.BOLD)
boldfont.SetPointSize(16)
dc = wx.ClientDC(self)
dc.Clear()
pencolour = wx.Colour(180, 0, 0, wx.ALPHA_OPAQUE)
brushcolour = wx.Colour(180, 0, 0, wx.ALPHA_OPAQUE)
dc.SetPen(wx.Pen(pencolour))
dc.SetBrush(wx.Brush(brushcolour))
rect = wx.Rect(0,0, 450, 200)
rect.SetPosition((100, 100))
dc.DrawRoundedRectangleRect(rect, 8)
message = 'Unable to open video device.\nIs there one connected?\n\n' \
'Click here to retry.'
dc.SetTextForeground('white')
dc.DrawText(message, 280, 170)
dc.SetFont(boldfont)
dc.DrawText('Error', 280, 140)
dc.DrawBitmap(bitmap, offset[0], offset[1], False)
self._error += 1
def onClick(self, event):
self._error = 1 #For some reason it'll dissapear otherwise.
self.displayError(self.errorBitmap, (128, 128))
self.Unbind(wx.EVT_LEFT_UP)
self.open(self.camera)
def save(self, record=-1):
"""
Captures, crops, and saves a webcam frame. Pass an explicit record number
otherwise writes to next in sequence. Returns zero-padded photo reference ID.
"""
img = highgui.cvQueryFrame(self.cap)
img = opencv.cvGetMat(img)
#No BGR => RGB conversion needed for PIL output.
pil = opencv.adaptors.Ipl2PIL(img) #convert to a PIL
#~ pil = pil.crop((80, 0, 560, 480))
#~ pil.show()
return self.store.savePIL(pil, record)
#~ try:
#~ pil.save(file)
#~ except KeyError:
#~ pil.save(file+'.jpg')
class Storage:
"""
Crops, resizes, stores, and retrieves images for the database.
"""
def __init__(self):
self.log = logging.getLogger(__name__)
#~ self.log.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s] %(module)-6s [%(levelname)-8s] %(message)s')
ch.setFormatter(formatter)
self.log.addHandler(ch)
self.log.debug("Created webcam storage instance.")
store = conf.config['webcam']['store'].lower()
if store == 'local':
self.log.debug("Configured for local storage.")
self.store = 'local'
olddir = os.getcwd()
os.chdir(os.path.join(conf.homepath, '.taxidi'))
self.target = os.path.abspath(conf.config['webcam']['target'])
self.thumbs = os.path.abspath(conf.config['webcam']['thumbs'])
os.chdir(olddir) #Switch back to old cwd
#See if target directories exist, and create if needed.
for target in [self.target, self.thumbs]:
if not os.path.exists(target):
#Doesn't exist, try to create:
self.log.warn("Directory {0} doesn't exist. Attempting to create...".format(target))
try:
os.makedirs(target)
except error as e:
self.log.error(e)
self.log.error("Directory already exists or permission denied when creating directory.")
raise
self.log.debug("Target: {0}".format(self.target))
self.log.debug("Thumbs: {0}".format(self.thumbs))
elif store == 'remote':
self.store = 'remote' #TODO remote storage (low priority)
def savePIL(self, image, record=-1):
"""
Saves an image in PIL format, cropping & resizing if needed, and creating
a thumbnail.
`image`: A valid PIL instance.
`record`: Explicit id (integer) to save the image to (as a reference).
All other values are determined from [webcam] section in config.
If record is -1, the id will be automatically determined by the first
available slot. Returns zero-padded ID as string.
"""
if ((image.size[0] != 640) and (image.size[1] != 480)) or \
((image.size[0] != 480) and (image.size[1] != 480)):
#Scale up/down:
print "Scale"
image.thumbnail((480, 480))
if image.size != (480, 480):
#Crop it down.
print "Crop"
image = image.crop((80, 0, 560, 480))
if record >= 0: #Explicit file
record = str(record).zfill(6)
else: #Determine automatically
record = str(self._getNextSlot()).zfill(6)
filename = os.path.join(self.target, record + '.jpg')
self.log.debug("Saving image as {0}...".format(filename))
try:
image.save(filename)
except:
self.log.error("Unable to save image!")
raise
#Create & save thumbnails:
image.thumbnail((128, 128))
filename = os.path.join(self.thumbs, record + '.jpg')
try:
image.save(filename)
except:
self.log.error("Unable to save image!")
raise
return record
def getThumbnail100(self, record):
"""
Returns a 100x100 wxBitmap given a record number.
"""
pil = Image.open(self.getThumbnailPath(record))
pil.thumbnail((100, 100))
image = wx.EmptyImage(*pil.size)
image.SetData(pil.convert("RGB").tostring())
return wx.BitmapFromImage(image)
def saveImage(self, filename, record=-1):
"""
Like savePIL(), but accepts local filename as argument instead.
Used for inserting a custom image into the photo database.
"""
try:
image = Image.open(filename)
except IOError as e:
self.log.error(e)
self.log.error('Unable to copy image.')
raise
#From a webcam most likely:
if image.size == (640, 480):
image = image.crop((80, 0, 560, 480))
#Scale to fit
image.thumbnail((480, 480), Image.ANTIALIAS)
if record >= 0: #Explicit file
record = str(record).zfill(6)
else: #Determine automatically
record = str(self._getNextSlot()).zfill(6)
filename = os.path.join(self.target, record + '.jpg')
self.log.debug("Saving image as {0}...".format(filename))
try:
image.save(filename)
except:
self.log.error("Unable to save image!")
raise
#Create & save thumbnails:
image.thumbnail((128, 128), Image.ANTIALIAS) #User higher quality for custom images
filename = os.path.join(self.thumbs, record + '.jpg')
try:
image.save(filename)
except:
self.log.error("Unable to save image!")
raise
return record
def delete(self, record):
try:
os.unlink(self.getImagePath(record))
os.unlink(self.getThumbnailPath(record))
except OSError as e:
self.log.error(e)
self.log.error("Unable to unlink files for photo record {0}".format(record))
def _getNextSlotAdvanced(self): #FIXME
files = []
ret = []
for dirname, dirnames, filenames in os.walk(self.target):
for name in filenames:
files.append(int(name.strip('.jpg')))
files.sort()
for k, g in groupby(enumerate(files), lambda (i, x): i - x):
ret.append(map(itemgetter(1), g))
return int(ret[1][-1]) + 1
def _getNextSlot(self):
files = []
for filename in os.listdir(self.target):
if filename.endswith('.jpg'):
files.append(int(filename.strip('.jpg')))
files.sort()
if len(files) == 0:
return 0
return int(files[-1]) + 1
def getImagePath(self, record):
"""
Returns the full file path for a photo record (local).
"""
try:
return os.path.join(self.target, str(int(record)).zfill(6) + '.jpg')
except ValueError:
return None
def getThumbnailPath(self, record):
"""
Returns full file path for a photo record thumbnail (local).
"""
return os.path.join(self.thumbs, str(int(record)).zfill(6) + '.jpg')
t_CONTROLS_SAVE = wx.NewEventType()
CONTROLS_SAVE = wx.PyEventBinder(t_CONTROLS_SAVE, 1)
t_CONTROLS_CANCEL = wx.NewEventType()
CONTROLS_CANCEL = wx.PyEventBinder(t_CONTROLS_CANCEL, 1)
t_CONTROLS_SELECT_FILE = wx.NewEventType()
CONTROLS_SELECT_FILE = wx.PyEventBinder(t_CONTROLS_SELECT_FILE, 1)
class Controls(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
# Controls
self.button_play = wx.Button(self, label="Take Picture", size=(140, 50))
self.button_cancel = wx.Button(self, label="Cancel", size=(140, 50))
self.button_file = wx.Button(self, label="Pick File...", size=(290, 30))
# Sizers
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(self.button_play, 0, wx.ALL, border=5)
sizer.Add(self.button_cancel, 0, wx.ALL, border=5)
bsizer = wx.BoxSizer(wx.VERTICAL)
bsizer.Add(sizer)
bsizer.Add(self.button_file, 0, wx.ALL, border=5)
csizer = wx.BoxSizer(wx.HORIZONTAL)
csizer.AddStretchSpacer()
csizer.Add(bsizer)
csizer.AddStretchSpacer()
self.SetSizer(csizer)
# Events
self.button_play.Bind(wx.EVT_BUTTON, self.Snapshot)
self.button_cancel.Bind(wx.EVT_BUTTON, self.OnCancel)
self.button_file.Bind(wx.EVT_BUTTON, self.OnFile)
def Snapshot(self, evt):
evt2 = wx.PyCommandEvent(t_CONTROLS_SAVE, self.GetId())
self.GetEventHandler().ProcessEvent(evt2)
evt.Skip()
def OnCancel(self, evt):
evt2 = wx.PyCommandEvent(t_CONTROLS_CANCEL, self.GetId())
self.GetEventHandler().ProcessEvent(evt2)
evt.Skip()
def OnFile(self, evt):
evt2 = wx.PyCommandEvent(t_CONTROLS_SELECT_FILE, self.GetId())
self.GetEventHandler().ProcessEvent(evt2)
evt.Skip()
class Panel(wx.Panel):
def __init__(self, parent):
"""
This is the master webcam capture panel.
"""
self.log = logging.getLogger(__name__)
wx.Panel.__init__(self, parent, style=wx.NO_BORDER)
self.log.debug('Created webcam capture panel.')
# Controls
device = int(conf.config['webcam']['device'])
self.log.debug('Using OpenCV device {0}'.format(device))
self.live = LivePanel(self, device)
self.controls = Controls(self)
# Sizer
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.live, 1, wx.RIGHT|wx.EXPAND, 4)
sizer.Add(self.controls, 0, wx.ALL|wx.EXPAND, 4)
self.SetSizer(sizer)
# Events
self.controls.Bind(CONTROLS_SAVE, self.OnSave)
self.controls.Bind(CONTROLS_CANCEL, self.OnStop)
self.controls.Bind(CONTROLS_SELECT_FILE, self.OnFile)
self.controls.SetBackgroundColour('#005889') #TODO: source colours from theme.
self.live.SetBackgroundColour('#005889')
self.SetBackgroundColour('#005889')
#Variables:
self.overwrite = None
#Storage instance:
self.PhotoStorage = Storage()
def OnSave(self, evt):
"""
Internal event for saving an image from the webcam.
Read the reference ID with GetFile().
"""
if self.overwrite != None:
self.fileSelection = self.live.save(self.overwrite)
else:
self.fileSelection = self.live.save()
self.overwrite = None
evt.Skip()
def SetOverwrite(self, record):
self.overwrite = record
def OnStop(self, evt):
"""
Hides the panel and suspends video input.
"""
self.log.debug('Hide & suspend webcam panel.')
self.Hide()
self.live.suspend()
evt.Skip()
def OnFile(self, evt):
"""
Internal event for the CONTROLS_SELECT_FILE event.
Read the selection with GetFile().
"""
self.live.suspend()
initial_dir = os.getcwd()
dlg = ib.ImageDialog(self, initial_dir)
dlg.Centre()
#TODO: Process file selection
if dlg.ShowModal() == wx.ID_OK:
# show the selected file
self.fileSelection = dlg.GetFile()
evt.Skip()
else:
self.fileSelection = None
dlg.Destroy()
self.live.resume()
def GetFile(self):
"""
Retrieve the file selected by the user after the
CONTROLS_SELECT_FILE event.
"""
return self.fileSelection
class CameraError(Exception):
def __init__(self, value=''):
if value == '':
self.error = 'Generic camera error.'
else:
self.error = value
def __str__(self):
return repr(self.error)
def getVideoDevices():
"""
Returns a list of available system video devices by name.
Pass index of this list to video capture class to use that device
(Linux only) or pass -1 to use the first available video device.
Note that this may have issues on some implementations of OpenCV.
"""
try:
import subprocess
devices = subprocess.check_output(
'for I in /sys/class/video4linux/*; do cat $I/name; done',
shell=True)
except AttributeError:
#Python < 2.7, use os.popen instead.
fdev = os.popen('for I in /sys/class/video4linux/*; do cat $I/name; done')
devices = fdev.read()
fdev.close()
#Cast to list and
devices = devices.split('\n')[:-1] #Remove trailing \n
return devices
if __name__ == '__main__':
import opencv
from opencv import cv, highgui
app = wx.PySimpleApp()
pFrame = wx.Frame(None, -1, "Webcam Viewer", size = (640, 560))
Panel(pFrame)
pFrame.Show()
app.MainLoop()
|
gpl-3.0
| 3,329,031,220,949,620,700
| 32.641682
| 112
| 0.586349
| false
| 3.785641
| true
| false
| false
|
ODM2/ODMToolsPython
|
odmtools/gui/pnlPlot.py
|
1
|
7003
|
#Boa:FramePanel:Panel1
import wx
from wx.lib.pubsub import pub as Publisher
try:
from agw import flatnotebook as fnb
except ImportError: # if it's not there locally, try the wxPython lib.
import wx.lib.agw.flatnotebook as fnb
import matplotlib
matplotlib.use('WXAgg')
import plotTimeSeries
import plotSummary
import plotHistogram
import plotBoxWhisker
import plotProbability
from odmtools.controller.logicPlotOptions import SeriesPlotInfo
import logging
# from odmtools.common.logger import LoggerTool
#
# tool = LoggerTool()
# logger = tool.setupLogger(__name__, __name__ + '.log', 'w', logging.DEBUG)
logger =logging.getLogger('main')
[wxID_PANEL1, wxID_PAGEBOX, wxID_PAGEHIST, wxID_PAGEPROB,
wxID_PAGESUMMARY, wxID_PAGETIMESERIES, wxID_TABPLOTS
] = [wx.NewId() for _init_ctrls in range(7)]
class pnlPlot(fnb.FlatNotebook):
def __init__(self, parent, taskserver):
self.taskserver = taskserver
self._init_ctrls(parent)
self.initPubSub()
self.parent = parent
def _init_ctrls(self, parent):
fnb.FlatNotebook.__init__(self, id=wxID_TABPLOTS, name=u'tabPlots',
parent=parent, pos=wx.Point(0, 0), size=wx.Size(491, 288),
agwStyle=fnb.FNB_NODRAG | fnb.FNB_HIDE_TABS)
# style |= fnb.FNB_HIDE_TABS
# self.book.SetAGWWindowStyleFlag(style)
self.pltTS = plotTimeSeries.plotTimeSeries(id=wxID_PAGETIMESERIES, name='pltTS',
parent=self, pos=wx.Point(0, 0), size=wx.Size(605, 458),
style=wx.TAB_TRAVERSAL)
self.AddPage(self.pltTS, 'TimeSeries')
self.pltProb = plotProbability.plotProb(id=wxID_PAGEPROB, name='pltProb',
parent=self, pos=wx.Point(0, 0), size=wx.Size(605, 458),
style=wx.TAB_TRAVERSAL)
self.AddPage(self.pltProb, 'Probablity')
self.pltHist = plotHistogram.plotHist(id=wxID_PAGEHIST, name='pltHist',
parent=self, pos=wx.Point(0, 0), size=wx.Size(605, 458),
style=wx.TAB_TRAVERSAL)
self.AddPage(self.pltHist, 'Histogram')
self.pltBox = plotBoxWhisker.PlotBox(id=wxID_PAGEBOX, name='pltBox',
parent=self, pos=wx.Point(0, 0), size=wx.Size(605, 458),
style=wx.TAB_TRAVERSAL)
self.AddPage(self.pltBox, 'Box/Whisker')
self.pltSum = plotSummary.plotSummary(id=wxID_PAGESUMMARY, name=u'pltSum',
parent=self, pos=wx.Point(784, 256), size=wx.Size(437, 477),
style=wx.TAB_TRAVERSAL)
self.AddPage(self.pltSum, 'Summary')
self._seriesPlotInfo = None
self.editID = None
self.legendVisible = False
def initPubSub(self):
Publisher.subscribe(self.onDateChanged, "onDateChanged")
Publisher.subscribe(self.onDateFull, "onDateFull")
Publisher.subscribe(self.onPlotType, "onPlotType")
Publisher.subscribe(self.onShowLegend, "onShowLegend")
Publisher.subscribe(self.onNumBins, "onNumBins")
Publisher.subscribe(self.onRemovePlot, "removePlot")
Publisher.subscribe(self.onRemovePlots, "removeMultPlot")
Publisher.subscribe(self.onChangeSelection, "changePlotSelection")
Publisher.subscribe(self.onUpdateValues, "updateValues")
Publisher.subscribe(self.clear, "clearPlot")
def onUpdateValues(self, event):
self.pltTS.updateValues()
def onChangeSelection(self, datetime_list):
self.pltTS.changePlotSelection( datetime_list)
def onNumBins(self, numBins):
self.pltHist.changeNumOfBins(numBins)
def onDateChanged(self, startDate, endDate):
self._seriesPlotInfo.updateDateRange(startDate, endDate)
self.redrawPlots()
def onDateFull(self):
self._seriesPlotInfo.updateDateRange()
self.redrawPlots()
# Reset the date to the full date
def onPlotType(self, event, ptype):
self.pltTS.onPlotType(ptype)
self.pltProb.onPlotType(ptype)
def onShowLegend(self, event, isVisible):
try:
self.pltTS.onShowLegend(isVisible)
self.pltProb.onShowLegend(isVisible)
self.legendVisible = isVisible
except AttributeError:
pass
def stopEdit(self):
self._seriesPlotInfo.stopEditSeries()
self.editID = None
self.pltTS.stopEdit()
self.redrawPlots()
def addEditPlot(self, memDB, seriesID, record_service):
self.record_service = record_service
if not self._seriesPlotInfo:
self._seriesPlotInfo = SeriesPlotInfo(memDB, self.taskserver)
self.editID = seriesID
self._seriesPlotInfo.setEditSeries(self.editID)
self.pltTS.setEdit(self.editID)
self.redrawPlots()
def addPlot(self, memDB, seriesID):
"""
Creates the plot
"""
logger.debug("Adding plot")
Publisher.sendMessage("EnablePlotButton", plot=self.getActivePlotID(), isActive=True)
if not self._seriesPlotInfo:
self._seriesPlotInfo = SeriesPlotInfo(memDB, self.taskserver)
self._seriesPlotInfo.update(seriesID, True)
logger.debug("Redrawing plots")
self.redrawPlots()
def onRemovePlot(self, seriesID):
self._seriesPlotInfo.update(seriesID, False)
self.redrawPlots()
def onRemovePlots(self, seriesIDs):
for series in seriesIDs:
self._seriesPlotInfo.update(series.id, False)
self.redrawPlots()
def redrawPlots(self):
logger.debug("Plot Summary")
self.pltSum.Plot(self._seriesPlotInfo)
logger.debug("Plot Probability")
self.pltProb.Plot(self._seriesPlotInfo)
logger.debug("Plot Boxwhisker")
self.pltBox.Plot(self._seriesPlotInfo)
logger.debug("Plot Timeseries")
self.pltTS.Plot(self._seriesPlotInfo)
logger.debug("Plot Histogram")
self.pltHist.Plot(self._seriesPlotInfo)
self.onShowLegend(event=None, isVisible=self.legendVisible)
maxStart, maxEnd, currStart, currEnd = self._seriesPlotInfo.getDates()
Publisher.sendMessage("resetdate", startDate=maxStart, endDate=maxEnd, currStart=currStart, currEnd=currEnd)
def selectPlot(self, value):
self.SetSelection(value)
def getActivePlotID(self):
return self.GetSelection()
def close(self):
self.pltTS.close()
def clear(self):
"""
:return:
"""
if self._seriesPlotInfo:
for seriesID in self._seriesPlotInfo.getSeriesIDs():
self._seriesPlotInfo.update(seriesID, False)
self.redrawPlots()
|
bsd-3-clause
| -7,721,344,204,155,205,000
| 33.328431
| 116
| 0.622305
| false
| 3.653104
| false
| false
| false
|
hyperkitty/kittystore
|
kittystore/__init__.py
|
1
|
3228
|
# -*- coding: utf-8 -*-
"""
Module entry point: call get_store() to instanciate a KittyStore
implementation.
Copyright (C) 2012 Aurelien Bompard
Author: Aurelien Bompard <abompard@fedoraproject.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or (at
your option) any later version.
See http://www.gnu.org/copyleft/gpl.html for the full text of the
license.
"""
from __future__ import absolute_import, print_function, unicode_literals
__all__ = ("get_store", "create_store", "MessageNotFound",
"SchemaUpgradeNeeded")
from kittystore.search import SearchEngine
from kittystore.caching import register_events
def _check_settings(settings):
required_keys = ("KITTYSTORE_URL", "KITTYSTORE_SEARCH_INDEX",
"MAILMAN_REST_SERVER", "MAILMAN_API_USER",
"MAILMAN_API_PASS")
for req_key in required_keys:
try:
getattr(settings, req_key)
except AttributeError:
raise AttributeError("The settings file is missing the \"%s\" key" % req_key)
if settings.KITTYSTORE_URL.startswith("mongo://"):
raise NotImplementedError
def _get_search_index(settings):
search_index_path = settings.KITTYSTORE_SEARCH_INDEX
if search_index_path is None:
return None
return SearchEngine(search_index_path)
def get_store(settings, debug=None, auto_create=False):
"""Factory for a KittyStore subclass"""
_check_settings(settings)
if debug is None:
debug = getattr(settings, "KITTYSTORE_DEBUG", False)
search_index = _get_search_index(settings)
if getattr(settings, "KITTYSTORE_USE_STORM", False):
from kittystore.storm import get_storm_store
store = get_storm_store(settings, search_index, debug, auto_create)
else:
from kittystore.sa import get_sa_store
store = get_sa_store(settings, search_index, debug, auto_create)
if search_index is not None and search_index.needs_upgrade():
if auto_create:
search_index.upgrade(store)
else:
store.close()
raise SchemaUpgradeNeeded()
register_events()
return store
def create_store(settings, debug=None):
"""Factory for a KittyStore subclass"""
_check_settings(settings)
if debug is None:
debug = getattr(settings, "KITTYSTORE_DEBUG", False)
search_index = _get_search_index(settings)
if getattr(settings, "KITTYSTORE_USE_STORM", False):
from kittystore.storm import get_storm_store, create_storm_db
version = create_storm_db(settings, debug)
store = get_storm_store(settings, search_index, debug)
else:
from kittystore.sa import create_sa_db, get_sa_store
version = create_sa_db(settings, debug)
store = get_sa_store(settings, search_index, debug)
if search_index is not None and search_index.needs_upgrade():
search_index.upgrade(store)
return store, version
class SchemaUpgradeNeeded(Exception):
"""Raised when there are pending patches"""
class MessageNotFound(Exception):
pass
|
gpl-3.0
| 1,805,717,560,906,570,800
| 32.278351
| 89
| 0.682156
| false
| 3.714614
| false
| false
| false
|
fgaudin/aemanager
|
accounts/models.py
|
1
|
20254
|
# coding=utf-8
from decimal import Decimal
from django.utils.formats import localize
import datetime
from reportlab.platypus import Paragraph, Spacer
from reportlab.lib.units import inch
from reportlab.platypus import Table, TableStyle
from django.db import models, connection
from core.models import OwnedObject
from django.utils.translation import ugettext_lazy as _, ugettext
from contact.models import Contact
from django.core.urlresolvers import reverse
from project.models import Row, Proposal, update_row_amount, \
ROW_CATEGORY_SERVICE, ROW_CATEGORY, PROPOSAL_STATE_ACCEPTED, ProposalRow, \
VAT_RATES_2_1, VAT_RATES_5_5, VAT_RATES_19_6
from django.db.models.aggregates import Sum, Min, Max
from django.db.models.signals import post_save, pre_save, post_delete
from django.core.validators import MaxValueValidator
from accounts.utils.pdf import InvoiceTemplate
PAYMENT_TYPE_CASH = 1
PAYMENT_TYPE_BANK_CARD = 2
PAYMENT_TYPE_TRANSFER = 3
PAYMENT_TYPE_CHECK = 4
PAYMENT_TYPE_PAYPAL = 5
PAYMENT_TYPE_DEBIT = 6
PAYMENT_TYPE = ((PAYMENT_TYPE_CASH, _('Cash')),
(PAYMENT_TYPE_BANK_CARD, _('Bank card')),
(PAYMENT_TYPE_TRANSFER, _('Transfer')),
(PAYMENT_TYPE_CHECK, _('Check')),
(PAYMENT_TYPE_PAYPAL, _('Paypal')),
(PAYMENT_TYPE_DEBIT, _('Debit')))
class Expense(OwnedObject):
date = models.DateField(verbose_name=_('Date'), help_text=_('format: mm/dd/yyyy'), db_index=True)
reference = models.CharField(max_length=50, blank=True, null=True, verbose_name=_('Reference'))
supplier = models.CharField(max_length=70, blank=True, null=True, verbose_name=_('Supplier'))
amount = models.DecimalField(max_digits=12, decimal_places=2, verbose_name=_('Amount'))
payment_type = models.IntegerField(choices=PAYMENT_TYPE, verbose_name=_('Payment type'))
description = models.CharField(max_length=100, blank=True, null=True, verbose_name=_('Description'))
class InvoiceAmountError(Exception):
pass
class InvoiceIdNotUniqueError(Exception):
pass
class InvalidInvoiceIdError(Exception):
pass
MAX_INVOICE_ID = 999999999
INVOICE_STATE_EDITED = 1
INVOICE_STATE_SENT = 2
INVOICE_STATE_PAID = 3
INVOICE_STATE = ((INVOICE_STATE_EDITED, _('Edited')),
(INVOICE_STATE_SENT, _('Sent')),
(INVOICE_STATE_PAID, _('Paid')))
class InvoiceManager(models.Manager):
def get_next_invoice_id(self, owner):
return (Invoice.objects.filter(owner=owner).aggregate(invoice_id=Max('invoice_id'))['invoice_id'] or 0) + 1
def get_paid_sales(self, owner, reference_date=None):
if not reference_date:
reference_date = datetime.date.today()
amount_sum = self.filter(state=INVOICE_STATE_PAID,
owner=owner,
paid_date__lte=reference_date,
paid_date__year=reference_date.year).aggregate(sales=Sum('amount'))
return amount_sum['sales'] or 0
def get_paid_service_sales(self, owner, year=None):
if not year:
year = datetime.date.today().year
amount_sum = InvoiceRow.objects.filter(invoice__state=INVOICE_STATE_PAID,
owner=owner,
category=ROW_CATEGORY_SERVICE,
invoice__paid_date__year=year).aggregate(sales=Sum('amount'))
return amount_sum['sales'] or 0
def get_waiting_payments(self, owner):
amount_sum = self.filter(state=INVOICE_STATE_SENT,
owner=owner).aggregate(sales=Sum('amount'))
return amount_sum['sales'] or 0
def get_waiting_service_payments(self, owner):
amount_sum = InvoiceRow.objects.filter(invoice__state=INVOICE_STATE_SENT,
owner=owner,
category=ROW_CATEGORY_SERVICE).aggregate(sales=Sum('amount'))
return amount_sum['sales'] or 0
def get_late_invoices(self, owner):
late_invoices = self.filter(state=INVOICE_STATE_SENT,
payment_date__lt=datetime.date.today(),
owner=owner)
return late_invoices
def get_late_invoices_for_notification(self):
late_invoices = self.filter(state=INVOICE_STATE_SENT,
payment_date__lt=datetime.date.today(),
owner__notification__notify_late_invoices=True)
return late_invoices
def get_invoices_to_send(self, owner):
invoices_to_send = self.filter(state=INVOICE_STATE_EDITED,
edition_date__lte=datetime.date.today(),
owner=owner)
return invoices_to_send
def get_invoices_to_send_for_notification(self):
invoices_to_send = self.filter(state=INVOICE_STATE_EDITED,
edition_date__lte=datetime.date.today(),
owner__notification__notify_invoices_to_send=True)
return invoices_to_send
def get_paid_sales_for_period(self, owner, begin_date, end_date):
if not begin_date or not end_date:
return 0
amount_sum = self.filter(state=INVOICE_STATE_PAID,
owner=owner,
paid_date__gte=begin_date,
paid_date__lte=end_date).aggregate(sales=Sum('amount'))
return amount_sum['sales'] or 0
def get_waiting_sales_for_period(self, owner, end_date, begin_date=None):
if not end_date:
return 0
amount_sum = self.filter(state__lte=INVOICE_STATE_SENT,
owner=owner,
payment_date__lte=end_date)
if begin_date:
amount_sum = amount_sum.filter(payment_date__gte=begin_date)
amount_sum = amount_sum.aggregate(waiting=Sum('amount'))
return amount_sum['waiting'] or 0
def get_first_invoice_paid_date(self, owner):
return self.filter(owner=owner).aggregate(min_date=Min('paid_date'))['min_date']
def get_paid_invoices(self, owner, begin_date=None):
if not begin_date:
return self.filter(state=INVOICE_STATE_PAID,
owner=owner,
paid_date__year=datetime.date.today().year).order_by('paid_date')
else:
return self.filter(state=INVOICE_STATE_PAID,
owner=owner,
paid_date__lte=datetime.date.today(),
paid_date__gte=begin_date).order_by('paid_date')
def get_waiting_invoices(self, owner):
return self.filter(state__lte=INVOICE_STATE_SENT,
owner=owner).order_by('payment_date')
def get_to_be_invoiced(self, owner):
accepted_proposal_amount_sum = Proposal.objects.filter(state=PROPOSAL_STATE_ACCEPTED,
owner=owner).extra(where=['project_proposal.ownedobject_ptr_id NOT IN (SELECT proposal_id FROM accounts_invoicerow irow JOIN accounts_invoice i ON irow.invoice_id = i.ownedobject_ptr_id WHERE i.state IN (%s,%s) AND irow.balance_payments = %s)'],
params=[INVOICE_STATE_SENT, INVOICE_STATE_PAID, True]).aggregate(amount=Sum('amount'))
# exclude amount found in sent or paid invoices referencing accepted proposal, aka computing already invoiced from not sold proposal
invoicerows_to_exclude = InvoiceRow.objects.extra(where=['accounts_invoicerow.proposal_id NOT IN (SELECT proposal_id FROM accounts_invoicerow irow JOIN accounts_invoice i ON irow.invoice_id = i.ownedobject_ptr_id WHERE i.state IN (%s,%s) AND irow.balance_payments = %s)'],
params=[INVOICE_STATE_SENT, INVOICE_STATE_PAID, True]).exclude(invoice__state=INVOICE_STATE_EDITED).filter(owner=owner).aggregate(amount=Sum('amount'))
# adding invoice rows of edited invoices which don't have proposal linked
invoicerows_whithout_proposals = InvoiceRow.objects.filter(owner=owner,
proposal=None,
invoice__state=INVOICE_STATE_EDITED).aggregate(amount=Sum('amount'))
return (accepted_proposal_amount_sum['amount'] or 0) - (invoicerows_to_exclude['amount'] or 0) + (invoicerows_whithout_proposals['amount'] or 0)
def get_service_to_be_invoiced(self, owner):
accepted_proposal_amount_sum = ProposalRow.objects.filter(proposal__state=PROPOSAL_STATE_ACCEPTED,
category=ROW_CATEGORY_SERVICE,
owner=owner).extra(where=['project_proposal.ownedobject_ptr_id NOT IN (SELECT proposal_id FROM accounts_invoicerow irow JOIN accounts_invoice i ON irow.invoice_id = i.ownedobject_ptr_id WHERE i.state IN (%s,%s) AND irow.balance_payments = %s)'],
params=[INVOICE_STATE_SENT, INVOICE_STATE_PAID, True]).aggregate(amount=Sum('amount'))
invoicerows_to_exclude = InvoiceRow.objects.filter(proposal__state=PROPOSAL_STATE_ACCEPTED,
category=ROW_CATEGORY_SERVICE,
owner=owner).extra(where=['accounts_invoicerow.proposal_id NOT IN (SELECT proposal_id FROM accounts_invoicerow irow JOIN accounts_invoice i ON irow.invoice_id = i.ownedobject_ptr_id WHERE i.state IN (%s,%s) AND irow.balance_payments = %s)'],
params=[INVOICE_STATE_SENT, INVOICE_STATE_PAID, True]).exclude(invoice__state=INVOICE_STATE_EDITED).filter(owner=owner).aggregate(amount=Sum('amount'))
return (accepted_proposal_amount_sum['amount'] or 0) - (invoicerows_to_exclude['amount'] or 0)
def get_vat_for_period(self, owner, begin_date, end_date):
if not begin_date or not end_date:
return 0
amount_sum_2_1 = InvoiceRow.objects.filter(vat_rate=VAT_RATES_2_1,
invoice__state=INVOICE_STATE_PAID,
invoice__owner=owner,
invoice__paid_date__gte=begin_date,
invoice__paid_date__lte=end_date).aggregate(vat=Sum('amount'))
amount_sum_5_5 = InvoiceRow.objects.filter(vat_rate=VAT_RATES_5_5,
invoice__state=INVOICE_STATE_PAID,
invoice__owner=owner,
invoice__paid_date__gte=begin_date,
invoice__paid_date__lte=end_date).aggregate(vat=Sum('amount'))
amount_sum_19_6 = InvoiceRow.objects.filter(vat_rate=VAT_RATES_19_6,
invoice__state=INVOICE_STATE_PAID,
invoice__owner=owner,
invoice__paid_date__gte=begin_date,
invoice__paid_date__lte=end_date).aggregate(vat=Sum('amount'))
return (amount_sum_2_1['vat'] or 0) * VAT_RATES_2_1 / 100\
+ (amount_sum_5_5['vat'] or 0) * VAT_RATES_5_5 / 100\
+ (amount_sum_19_6['vat'] or 0) * VAT_RATES_19_6 / 100
class Invoice(OwnedObject):
customer = models.ForeignKey(Contact, blank=True, null=True, verbose_name=_('Customer'))
invoice_id = models.IntegerField(verbose_name=_("Invoice id"))
state = models.IntegerField(choices=INVOICE_STATE, default=INVOICE_STATE_EDITED, verbose_name=_("State"), db_index=True)
amount = models.DecimalField(blank=True, max_digits=12, decimal_places=2, default=0, verbose_name=_("Amount"))
edition_date = models.DateField(verbose_name=_("Edition date"), help_text=_('format: mm/dd/yyyy'), db_index=True)
payment_date = models.DateField(verbose_name=_("Payment date"), help_text=_('format: mm/dd/yyyy'), db_index=True)
payment_type = models.IntegerField(choices=PAYMENT_TYPE, blank=True, null=True, verbose_name=_('Payment type'))
paid_date = models.DateField(blank=True, null=True, verbose_name=_("Paid date"), help_text=_('format: mm/dd/yyyy'), db_index=True)
execution_begin_date = models.DateField(blank=True, null=True, verbose_name=_("Execution begin date"), help_text=_('format: mm/dd/yyyy'))
execution_end_date = models.DateField(blank=True, null=True, verbose_name=_("Execution end date"), help_text=_('format: mm/dd/yyyy'))
penalty_date = models.DateField(blank=True, null=True, verbose_name=_("Penalty date"), help_text=_('format: mm/dd/yyyy'))
penalty_rate = models.DecimalField(blank=True, null=True, max_digits=4, decimal_places=2, verbose_name=_("Penalty rate"))
discount_conditions = models.CharField(max_length=100, blank=True, null=True, verbose_name=_("Discount conditions"))
footer_note = models.CharField(max_length=90, blank=True, null=True, verbose_name=_('Footer note'))
objects = InvoiceManager()
class Meta:
ordering = ['invoice_id']
def __unicode__(self):
return "<a href=\"%s\">%s</a>" % (reverse('invoice_detail', kwargs={'id' : self.id}), ugettext("invoice #%d") % (self.invoice_id))
def isInvoiceIdValid(self):
validator = MaxValueValidator(MAX_INVOICE_ID)
try:
validator(self.invoice_id)
except:
return False
return True
def isInvoiceIdUnique(self, owner):
invoices = Invoice.objects.filter(owner=owner,
invoice_id=self.invoice_id)
if self.id:
invoices = invoices.exclude(id=self.id)
if len(invoices):
return False
return True
def getNature(self):
natures = self.invoice_rows.values_list('category', flat=True).order_by('category').distinct()
result = []
natures_dict = dict(ROW_CATEGORY)
for nature in natures:
result.append(unicode(natures_dict[nature]))
return " & ".join(result)
def save(self, force_insert=False, force_update=False, using=None, user=None):
if not self.isInvoiceIdValid():
raise InvalidInvoiceIdError(ugettext('Invoice id must be less than or equal to %d') % (MAX_INVOICE_ID))
if not self.isInvoiceIdUnique(user):
raise InvoiceIdNotUniqueError(ugettext("Invoice id must be unique"))
super(Invoice, self).save(force_insert, force_update, using, user)
def check_amounts(self):
proposals = Proposal.objects.filter(invoice_rows__invoice=self).distinct()
for proposal in proposals:
remaining_amount = proposal.get_remaining_to_invoice(exclude_invoice=self)
rows_amount = InvoiceRow.objects.filter(invoice=self,
proposal=proposal).aggregate(amount=Sum('amount'))['amount'] or 0
if float(remaining_amount) < float(rows_amount):
raise InvoiceRowAmountError(ugettext("Amounts invoiced can't be greater than proposals remaining amounts"))
return True
def get_vat(self):
cursor = connection.cursor()
cursor.execute('SELECT SUM(accounts_invoicerow.amount * accounts_invoicerow.vat_rate / 100) AS "vat" FROM "accounts_invoicerow" WHERE "accounts_invoicerow"."invoice_id" = %s', [self.id])
row = cursor.fetchone()
vat = row[0] or Decimal(0)
vat = vat.quantize(Decimal(1)) if vat == vat.to_integral() else vat.normalize()
return vat
def amount_including_tax(self):
return self.amount + self.get_vat()
def to_pdf(self, user, response):
filename = ugettext('invoice_%(invoice_id)d.pdf') % {'invoice_id': self.invoice_id}
response['Content-Disposition'] = 'attachment; filename=%s' % (filename)
invoice_template = InvoiceTemplate(response, user)
invoice_template.init_doc(ugettext('Invoice #%(invoice_id)d') % {'invoice_id': self.invoice_id})
invoice_template.add_headers(self, self.customer, self.edition_date)
invoice_template.add_title(_("INVOICE #%d") % (self.invoice_id))
# proposal row list
rows = self.invoice_rows.all()
invoice_template.add_rows(rows)
# total amount on the right side of footer
right_block = invoice_template.get_total_amount(self.amount, rows)
invoice_amount = self.amount
invoice_amount = invoice_amount.quantize(Decimal(1)) if invoice_amount == invoice_amount.to_integral() else invoice_amount.normalize()
left_block = [Paragraph(_("Payment date : %s") % (localize(self.payment_date)), InvoiceTemplate.styleN),
Paragraph(_("Penalty begins on : %s") % (localize(self.penalty_date) or ''), InvoiceTemplate.styleN),
Paragraph(_("Penalty rate : %s") % (localize(self.penalty_rate) or ''), InvoiceTemplate.styleN),
Paragraph(_("Discount conditions : %s") % (self.discount_conditions or ''), InvoiceTemplate.styleN)]
if self.footer_note:
left_block.append(Spacer(invoice_template.doc.width, 0.1 * inch))
left_block.append(Paragraph(self.footer_note, InvoiceTemplate.styleNSmall))
else:
left_block.append(Spacer(invoice_template.doc.width, 0.2 * inch))
if self.owner.get_profile().iban_bban:
left_block.append(Paragraph(_("IBAN/BBAN : %s") % (self.owner.get_profile().iban_bban), InvoiceTemplate.styleNSmall))
if self.owner.get_profile().bic:
left_block.append(Paragraph(_("BIC/SWIFT : %s") % (self.owner.get_profile().bic), InvoiceTemplate.styleNSmall))
data = [[left_block,
'',
right_block], ]
if self.execution_begin_date and self.execution_end_date:
data[0][0].insert(1, Paragraph(_("Execution dates : %(begin_date)s to %(end_date)s") % {'begin_date': localize(self.execution_begin_date), 'end_date' : localize(self.execution_end_date)}, InvoiceTemplate.styleN))
footer_table = Table(data, [4.5 * inch, 0.3 * inch, 2.5 * inch], [1 * inch])
footer_table.setStyle(TableStyle([('VALIGN', (0, 0), (-1, -1), 'TOP'), ]))
invoice_template.append_to_story(footer_table)
invoice_template.build()
return response
class InvoiceRowAmountError(Exception):
pass
class InvoiceRow(Row):
invoice = models.ForeignKey(Invoice, related_name="invoice_rows")
proposal = models.ForeignKey(Proposal, related_name="invoice_rows", verbose_name=_('Proposal'), null=True, blank=True)
balance_payments = models.BooleanField(verbose_name=_('Balance payments for the proposal'), help_text=_('"Balancing payments for the proposal" means there will be no future invoices for the selected proposal. Thus the amount remaining to invoice for this proposal will fall to zero and its state will be set to "balanced" when all invoices are paid.'))
class Meta:
ordering = ['id']
def save(self, force_insert=False, force_update=False, using=None, user=None):
super(InvoiceRow, self).save(force_insert, force_update, using, user)
def update_invoice_amount(sender, instance, created=None, **kwargs):
row = instance
invoice = row.invoice
invoice.amount = invoice.invoice_rows.all().aggregate(sum=Sum('amount'))['sum'] or 0
invoice.save(user=invoice.owner)
pre_save.connect(update_row_amount, sender=InvoiceRow)
post_save.connect(update_invoice_amount, sender=InvoiceRow)
post_delete.connect(update_invoice_amount, sender=InvoiceRow)
|
agpl-3.0
| 442,697,370,310,873,200
| 56.214689
| 356
| 0.604325
| false
| 3.904009
| false
| false
| false
|
asimshankar/tensorflow
|
tensorflow/python/kernel_tests/cond_v2_test.py
|
1
|
35442
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for cond_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import cond_v2
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import saver
from tensorflow.python.util import compat
class CondV2Test(test.TestCase):
def _testCond(self, true_fn, false_fn, train_vals, feed_dict=None):
if not feed_dict:
feed_dict = {}
with self.session(graph=ops.get_default_graph()) as sess:
pred = array_ops.placeholder(dtypes.bool, name="pred")
expected = control_flow_ops.cond(pred, true_fn, false_fn, name="expected")
actual = cond_v2.cond_v2(pred, true_fn, false_fn, name="actual")
expected_grad = gradients_impl.gradients(expected, train_vals)
actual_grad = gradients_impl.gradients(actual, train_vals)
sess_run_args = {pred: True}
sess_run_args.update(feed_dict)
expected_val, actual_val, expected_grad_val, actual_grad_val = sess.run(
(expected, actual, expected_grad, actual_grad), sess_run_args)
self.assertEqual(expected_val, actual_val)
self.assertEqual(expected_grad_val, actual_grad_val)
sess_run_args = {pred: False}
sess_run_args.update(feed_dict)
expected_val, actual_val, expected_grad_val, actual_grad_val = sess.run(
(expected, actual, expected_grad, actual_grad), sess_run_args)
self.assertEqual(expected_val, actual_val)
self.assertEqual(expected_grad_val, actual_grad_val)
@test_util.run_deprecated_v1
def testBasic(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return x * 2.0
def false_fn():
return y * 3.0
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
@test_util.run_deprecated_v1
def testMultipleOutputs(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(3.0, name="y")
def true_fn():
return x * y, y
def false_fn():
return x, y * 3.0
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
@test_util.run_deprecated_v1
def testBasic2(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return x * y * 2.0
def false_fn():
return 2.0
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
@test_util.run_deprecated_v1
def testNoInputs(self):
with self.cached_session() as sess:
pred = array_ops.placeholder(dtypes.bool, name="pred")
def true_fn():
return constant_op.constant(1.0)
def false_fn():
return constant_op.constant(2.0)
out = cond_v2.cond_v2(pred, true_fn, false_fn)
self.assertEqual(sess.run(out, {pred: True}), (1.0,))
self.assertEqual(sess.run(out, {pred: False}), (2.0,))
def _createCond(self, name):
"""Creates a cond_v2 call and returns the output tensor and the cond op."""
pred = constant_op.constant(True, name="pred")
x = constant_op.constant(1.0, name="x")
def true_fn():
return x
def false_fn():
return x + 1
output = cond_v2.cond_v2(pred, true_fn, false_fn, name=name)
cond_op = output.op.inputs[0].op
self.assertEqual(cond_op.type, "If")
return output, cond_op
def testDefaultName(self):
with ops.Graph().as_default():
_, cond_op = self._createCond(None)
self.assertEqual(cond_op.name, "cond")
self.assertRegexpMatches(
cond_op.get_attr("then_branch").name, r"cond_true_\d*")
self.assertRegexpMatches(
cond_op.get_attr("else_branch").name, r"cond_false_\d*")
with ops.Graph().as_default():
with ops.name_scope("foo"):
_, cond1_op = self._createCond("")
self.assertEqual(cond1_op.name, "foo/cond")
self.assertRegexpMatches(
cond1_op.get_attr("then_branch").name, r"foo_cond_true_\d*")
self.assertRegexpMatches(
cond1_op.get_attr("else_branch").name, r"foo_cond_false_\d*")
_, cond2_op = self._createCond(None)
self.assertEqual(cond2_op.name, "foo/cond_1")
self.assertRegexpMatches(
cond2_op.get_attr("then_branch").name, r"foo_cond_1_true_\d*")
self.assertRegexpMatches(
cond2_op.get_attr("else_branch").name, r"foo_cond_1_false_\d*")
@test_util.run_v1_only("b/120545219")
def testDefunInCond(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
@function.defun
def fn():
return x * y * 2.0
return fn()
def false_fn():
return 2.0
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
@test_util.run_deprecated_v1
def testNestedDefunInCond(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return 2.0
def false_fn():
@function.defun
def fn():
@function.defun
def nested_fn():
return x * y * 2.0
return nested_fn()
return fn()
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
@test_util.run_deprecated_v1
def testDoubleNestedDefunInCond(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
@function.defun
def fn():
@function.defun
def nested_fn():
@function.defun
def nested_nested_fn():
return x * y * 2.0
return nested_nested_fn()
return nested_fn()
return fn()
def false_fn():
return 2.0
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
def testNestedCond(self):
def run_test(pred_value):
def build_graph():
pred = array_ops.placeholder(dtypes.bool, name="pred")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return 2.0
def false_fn():
def false_true_fn():
return x * y * 2.0
def false_false_fn():
return x * 5.0
return _cond(pred, false_true_fn, false_false_fn, "inside_false_fn")
return x, y, pred, true_fn, false_fn
with ops.Graph().as_default():
x, y, pred, true_fn, false_fn = build_graph()
self._testCond(true_fn, false_fn, [x, y], {pred: pred_value})
self._testCond(true_fn, false_fn, [x], {pred: pred_value})
self._testCond(true_fn, false_fn, [y], {pred: pred_value})
run_test(True)
run_test(False)
def testNestedCondBothBranches(self):
def run_test(pred_value):
def build_graph():
pred = array_ops.placeholder(dtypes.bool, name="pred")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return _cond(pred, lambda: x + y, lambda: x * x, name=None)
def false_fn():
return _cond(pred, lambda: x - y, lambda: y * y, name=None)
return x, y, pred, true_fn, false_fn
with ops.Graph().as_default():
x, y, pred, true_fn, false_fn = build_graph()
self._testCond(true_fn, false_fn, [x, y], {pred: pred_value})
self._testCond(true_fn, false_fn, [x], {pred: pred_value})
self._testCond(true_fn, false_fn, [y], {pred: pred_value})
run_test(True)
run_test(False)
def testDoubleNestedCond(self):
def run_test(pred1_value, pred2_value):
def build_graph():
pred1 = array_ops.placeholder(dtypes.bool, name="pred1")
pred2 = array_ops.placeholder(dtypes.bool, name="pred2")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return 2.0
def false_fn():
def false_true_fn():
def false_true_true_fn():
return x * y * 2.0
def false_true_false_fn():
return x * 10.0
return _cond(
pred1,
false_true_true_fn,
false_true_false_fn,
name="inside_false_true_fn")
def false_false_fn():
return x * 5.0
return _cond(
pred2, false_true_fn, false_false_fn, name="inside_false_fn")
return x, y, pred1, pred2, true_fn, false_fn
with ops.Graph().as_default():
x, y, pred1, pred2, true_fn, false_fn = build_graph()
self._testCond(true_fn, false_fn, [x, y], {
pred1: pred1_value,
pred2: pred2_value
})
x, y, pred1, pred2, true_fn, false_fn = build_graph()
self._testCond(true_fn, false_fn, [x], {
pred1: pred1_value,
pred2: pred2_value
})
x, y, pred1, pred2, true_fn, false_fn = build_graph()
self._testCond(true_fn, false_fn, [y], {
pred1: pred1_value,
pred2: pred2_value
})
run_test(True, True)
run_test(True, False)
run_test(False, False)
run_test(False, True)
def testGradientFromInsideDefun(self):
def build_graph():
pred_outer = array_ops.placeholder(dtypes.bool, name="pred_outer")
pred_inner = array_ops.placeholder(dtypes.bool, name="pred_inner")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return 2.0
def false_fn():
def inner_true_fn():
return x * y * 2.0
def inner_false_fn():
return x * 5.0
return cond_v2.cond_v2(
pred_inner, inner_true_fn, inner_false_fn, name="inner_cond")
cond_outer = cond_v2.cond_v2(
pred_outer, true_fn, false_fn, name="outer_cond")
# Compute grads inside a Defun.
@function.defun
def nesting_fn():
return gradients_impl.gradients(cond_outer, [x, y])
grads = nesting_fn()
return grads, pred_outer, pred_inner
with ops.Graph().as_default():
grads, pred_outer, pred_inner = build_graph()
with self.session(graph=ops.get_default_graph()) as sess:
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: True
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: False
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: True
}), [4., 2.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: False
}), [5., 0.])
def testGradientFromInsideNestedDefun(self):
def build_graph():
pred_outer = array_ops.placeholder(dtypes.bool, name="pred_outer")
pred_inner = array_ops.placeholder(dtypes.bool, name="pred_inner")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return 2.0
def false_fn():
def inner_true_fn():
return x * y * 2.0
def inner_false_fn():
return x * 5.0
return cond_v2.cond_v2(
pred_inner, inner_true_fn, inner_false_fn, name="inner_cond")
cond_outer = cond_v2.cond_v2(
pred_outer, true_fn, false_fn, name="outer_cond")
# Compute grads inside a Defun.
@function.defun
def nesting_fn():
@function.defun
def inner_nesting_fn():
return gradients_impl.gradients(cond_outer, [x, y])
return inner_nesting_fn()
grads = nesting_fn()
return grads, pred_outer, pred_inner
with ops.Graph().as_default():
grads, pred_outer, pred_inner = build_graph()
with self.session(graph=ops.get_default_graph()) as sess:
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: True
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: False
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: True
}), [4., 2.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: False
}), [5., 0.])
def testBuildCondAndGradientInsideDefun(self):
def build_graph():
pred_outer = array_ops.placeholder(dtypes.bool, name="pred_outer")
pred_inner = array_ops.placeholder(dtypes.bool, name="pred_inner")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
# Build cond and its gradient inside a Defun.
@function.defun
def fn():
def true_fn():
return 2.0
def false_fn():
def inner_true_fn():
return x * y * 2.0
def inner_false_fn():
return x * 5.0
return cond_v2.cond_v2(
pred_inner, inner_true_fn, inner_false_fn, name="inner_cond")
cond_outer = cond_v2.cond_v2(
pred_outer, true_fn, false_fn, name="outer_cond")
return gradients_impl.gradients(cond_outer, [x, y])
grads = fn()
return grads, pred_outer, pred_inner
with ops.Graph().as_default(), self.session(
graph=ops.get_default_graph()) as sess:
grads, pred_outer, pred_inner = build_graph()
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: True
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: False
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: True
}), [4., 2.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: False
}), [5., 0.])
@test_util.run_deprecated_v1
def testSecondDerivative(self):
with self.cached_session() as sess:
pred = array_ops.placeholder(dtypes.bool, name="pred")
x = constant_op.constant(3.0, name="x")
def true_fn():
return math_ops.pow(x, 3)
def false_fn():
return x
cond = cond_v2.cond_v2(pred, true_fn, false_fn, name="cond")
cond_grad = gradients_impl.gradients(cond, [x])
cond_grad_grad = gradients_impl.gradients(cond_grad, [x])
# d[x^3]/dx = 3x^2
true_val = sess.run(cond_grad, {pred: True})
self.assertEqual(true_val, [27.0])
# d[x]/dx = 1
false_val = sess.run(cond_grad, {pred: False})
self.assertEqual(false_val, [1.0])
true_val = sess.run(cond_grad_grad, {pred: True})
# d2[x^3]/dx2 = 6x
self.assertEqual(true_val, [18.0])
false_val = sess.run(cond_grad_grad, {pred: False})
# d2[x]/dx2 = 0
self.assertEqual(false_val, [0.0])
def testGradientOfDeserializedCond(self):
with ops.Graph().as_default():
pred = array_ops.placeholder(dtypes.bool, name="pred")
x = constant_op.constant(3.0, name="x")
ops.add_to_collection("x", x)
def true_fn():
return math_ops.pow(x, 3)
def false_fn():
return x
ops.add_to_collection("pred", pred)
cond = cond_v2.cond_v2(pred, true_fn, false_fn, name="cond")
ops.add_to_collection("cond", cond)
meta_graph = saver.export_meta_graph()
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
saver.import_meta_graph(meta_graph)
x = ops.get_collection("x")[0]
pred = ops.get_collection("pred")[0]
cond = ops.get_collection("cond")
cond_grad = gradients_impl.gradients(cond, [x], name="cond_grad")
cond_grad_grad = gradients_impl.gradients(
cond_grad, [x], name="cond_grad_grad")
# d[x^3]/dx = 3x^2
true_val = sess.run(cond_grad, {pred: True})
self.assertEqual(true_val, [27.0])
# d[x]/dx = 1
false_val = sess.run(cond_grad, {pred: False})
self.assertEqual(false_val, [1.0])
true_val = sess.run(cond_grad_grad, {pred: True})
# d2[x^3]/dx2 = 6x
self.assertEqual(true_val, [18.0])
false_val = sess.run(cond_grad_grad, {pred: False})
# d2[x]/dx2 = 0
self.assertEqual(false_val, [0.0])
def testLowering(self):
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cond_output, _ = self._createCond("cond")
run_options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
sess.run(cond_output, options=run_options, run_metadata=run_metadata)
# If lowering was enabled, there should be a `Switch` node
switch_found = any(
any(node.op == "Switch" for node in graph.node)
for graph in run_metadata.partition_graphs
)
self.assertTrue(switch_found,
"A `Switch` op should exist if the graph was lowered.")
# If lowering was enabled, there should be no `If` node
if_found = any(
any(node.op == "If" for node in graph.node)
for graph in run_metadata.partition_graphs
)
self.assertFalse(if_found,
"An `If` op was found, but it should be lowered.")
@test_util.run_deprecated_v1
def testLoweringDisabledInXLA(self):
with self.session(graph=ops.Graph()) as sess:
# Build the cond_v2 in an XLA context
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
cond_output, _ = self._createCond("cond")
xla_context.Exit()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
sess.run(cond_output, options=run_options, run_metadata=run_metadata)
# Lowering disabled in XLA, there should be no `Switch` node
switch_found = any(
any(node.op == "Switch" for node in graph.node)
for graph in run_metadata.partition_graphs
)
self.assertFalse(
switch_found,
"A `Switch` op exists, but the graph should not be lowered.")
# Lowering disabled in XLA, there should still be an `If` node
if_found = any(
any(node.op == "If" for node in graph.node)
for graph in run_metadata.partition_graphs
)
self.assertTrue(
if_found,
"An `If` op was not found, but the graph should not be lowered.")
@test_util.run_deprecated_v1
def testLoweringDisabledWithSingleThreadedExecutorContext(self):
with self.session(graph=ops.Graph()) as sess:
@function.defun
def _add_cond(x):
return cond_v2.cond_v2(
constant_op.constant(True, name="pred"),
lambda: x,
lambda: x + 1)
x = array_ops.placeholder(shape=None, dtype=dtypes.float32)
with context.function_executor_type("SINGLE_THREADED_EXECUTOR"):
out_cond = _add_cond(x)
# The fact that sess.run() succeeds means lowering is disabled, because
# the single threaded executor does not support cond v1 ops.
sess.run(out_cond, feed_dict={x: 1.0})
@test_util.enable_control_flow_v2
def testStructuredOutputs(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(3.0, name="y")
def true_fn():
return ((x * y,), y)
def false_fn():
return ((x,), y * 3.0)
output = control_flow_ops.cond(
constant_op.constant(False), true_fn, false_fn)
self.assertEqual(self.evaluate(output[0][0]), 1.)
self.assertEqual(self.evaluate(output[1]), 9.)
@test_util.enable_control_flow_v2
@test_util.run_deprecated_v1
def testRaisesOutputStructuresMismatch(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(3.0, name="y")
def true_fn():
return x * y, y
def false_fn():
return ((x,), y * 3.0)
with self.assertRaisesRegexp(
ValueError, "Outputs of true_fn and false_fn must"
" have the same structure"):
control_flow_ops.cond(constant_op.constant(False), true_fn, false_fn)
@test_util.enable_control_flow_v2
def testCondAndTensorArray(self):
x = math_ops.range(-5, 5)
output = tensor_array_ops.TensorArray(dtype=dtypes.int32, size=x.shape[0])
def loop_body(i, output):
def if_true():
return output.write(i, x[i]**2)
def if_false():
return output.write(i, x[i])
output = control_flow_ops.cond(x[i] > 0, if_true, if_false)
return i + 1, output
_, output = control_flow_ops.while_loop(
lambda i, arr: i < x.shape[0],
loop_body,
loop_vars=(constant_op.constant(0), output))
output_t = output.stack()
self.assertAllEqual(
self.evaluate(output_t), [-5, -4, -3, -2, -1, 0, 1, 4, 9, 16])
@test_util.enable_control_flow_v2
def testCondAndTensorArrayInDefun(self):
@function.defun
def f():
x = math_ops.range(-5, 5)
output = tensor_array_ops.TensorArray(dtype=dtypes.int32, size=x.shape[0])
def loop_body(i, output):
def if_true():
return output.write(i, x[i]**2)
def if_false():
return output.write(i, x[i])
output = control_flow_ops.cond(x[i] > 0, if_true, if_false)
return i + 1, output
_, output = control_flow_ops.while_loop(
lambda i, arr: i < x.shape[0],
loop_body,
loop_vars=(constant_op.constant(0), output))
return output.stack()
output_t = f()
self.assertAllEqual(
self.evaluate(output_t), [-5, -4, -3, -2, -1, 0, 1, 4, 9, 16])
@test_util.run_deprecated_v1
def testForwardPassRewrite(self):
x = constant_op.constant(1.0, name="x")
output = cond_v2.cond_v2(constant_op.constant(True),
lambda: x * 2.0,
lambda: x)
if_op = output.op.inputs[0].op
self.assertEqual(if_op.type, "If")
# pylint: disable=g-deprecated-assert
self.assertEqual(len(if_op.outputs), 1)
gradients_impl.gradients(output, x)
# if_op should have been rewritten to output 2.0 intermediate.
self.assertEqual(len(if_op.outputs), 2)
gradients_impl.gradients(output, x)
# Computing the gradient again shouldn't rewrite if_op again.
self.assertEqual(len(if_op.outputs), 2)
# pylint: enable=g-deprecated-assert
class CondV2CollectionTest(test.TestCase):
def testCollectionIntValueAccessInCond(self):
"""Read values from graph collections inside of cond_v2."""
with ops.Graph().as_default() as g:
with self.session(graph=g):
x = 2
y = 5
ops.add_to_collection("x", x)
ops.add_to_collection("y", y)
def fn():
x_const = constant_op.constant(ops.get_collection("x")[0])
y_const = constant_op.constant(ops.get_collection("y")[0])
return math_ops.add(x_const, y_const)
cnd = cond_v2.cond_v2(constant_op.constant(True), fn, fn)
self.assertEquals(cnd.eval(), 7)
def testCollectionTensorValueAccessInCond(self):
"""Read tensors from collections inside of cond_v2 & use them."""
with ops.Graph().as_default() as g:
with self.session(graph=g):
x = constant_op.constant(2)
y = constant_op.constant(5)
ops.add_to_collection("x", x)
ops.add_to_collection("y", y)
def fn():
x_read = ops.get_collection("x")[0]
y_read = ops.get_collection("y")[0]
return math_ops.add(x_read, y_read)
cnd = cond_v2.cond_v2(math_ops.less(x, y), fn, fn)
self.assertEquals(cnd.eval(), 7)
def testCollectionIntValueWriteInCond(self):
"""Make sure Int writes to collections work inside of cond_v2."""
with ops.Graph().as_default() as g:
with self.session(graph=g):
x = constant_op.constant(2)
y = constant_op.constant(5)
def true_fn():
z = math_ops.add(x, y)
ops.add_to_collection("z", 7)
return math_ops.mul(x, z)
def false_fn():
z = math_ops.add(x, y)
return math_ops.mul(x, z)
cnd = cond_v2.cond_v2(constant_op.constant(True), true_fn, false_fn)
self.assertEquals(cnd.eval(), 14)
read_z_collection = ops.get_collection("z")
self.assertEquals(read_z_collection, [7])
class CondV2ContainerTest(test.TestCase):
def testContainer(self):
"""Set containers outside & inside of cond_v2.
Make sure the containers are set correctly for both variable creation
(tested by variables.Variable) and for stateful ops (tested by FIFOQueue)
"""
self.skipTest("b/113048653")
with ops.Graph().as_default() as g:
with self.session(graph=g):
v0 = variables.Variable([0])
q0 = data_flow_ops.FIFOQueue(1, dtypes.float32)
def container(node):
return node.op.get_attr("container")
self.assertEqual(compat.as_bytes(""), container(v0))
self.assertEqual(compat.as_bytes(""), container(q0.queue_ref))
def true_fn():
# When this branch is created in cond below,
# the container should begin with 'l1'
v1 = variables.Variable([1])
q1 = data_flow_ops.FIFOQueue(1, dtypes.float32)
with ops.container("l2t"):
v2 = variables.Variable([2])
q2 = data_flow_ops.FIFOQueue(1, dtypes.float32)
v3 = variables.Variable([1])
q3 = data_flow_ops.FIFOQueue(1, dtypes.float32)
self.assertEqual(compat.as_bytes("l1"), container(v1))
self.assertEqual(compat.as_bytes("l1"), container(q1.queue_ref))
self.assertEqual(compat.as_bytes("l2t"), container(v2))
self.assertEqual(compat.as_bytes("l2t"), container(q2.queue_ref))
self.assertEqual(compat.as_bytes("l1"), container(v3))
self.assertEqual(compat.as_bytes("l1"), container(q3.queue_ref))
return constant_op.constant(2.0)
def false_fn():
# When this branch is created in cond below,
# the container should begin with 'l1'
v1 = variables.Variable([1])
q1 = data_flow_ops.FIFOQueue(1, dtypes.float32)
with ops.container("l2f"):
v2 = variables.Variable([2])
q2 = data_flow_ops.FIFOQueue(1, dtypes.float32)
v3 = variables.Variable([1])
q3 = data_flow_ops.FIFOQueue(1, dtypes.float32)
self.assertEqual(compat.as_bytes("l1"), container(v1))
self.assertEqual(compat.as_bytes("l1"), container(q1.queue_ref))
self.assertEqual(compat.as_bytes("l2f"), container(v2))
self.assertEqual(compat.as_bytes("l2f"), container(q2.queue_ref))
self.assertEqual(compat.as_bytes("l1"), container(v3))
self.assertEqual(compat.as_bytes("l1"), container(q3.queue_ref))
return constant_op.constant(6.0)
with ops.container("l1"):
cnd_true = cond_v2.cond_v2(
constant_op.constant(True), true_fn, false_fn)
self.assertEquals(cnd_true.eval(), 2)
cnd_false = cond_v2.cond_v2(
constant_op.constant(False), true_fn, false_fn)
self.assertEquals(cnd_false.eval(), 6)
v4 = variables.Variable([3])
q4 = data_flow_ops.FIFOQueue(1, dtypes.float32)
v5 = variables.Variable([4])
q5 = data_flow_ops.FIFOQueue(1, dtypes.float32)
self.assertEqual(compat.as_bytes("l1"), container(v4))
self.assertEqual(compat.as_bytes("l1"), container(q4.queue_ref))
self.assertEqual(compat.as_bytes(""), container(v5))
self.assertEqual(compat.as_bytes(""), container(q5.queue_ref))
class CondV2ColocationGroupAndDeviceTest(test.TestCase):
def testColocateWithBeforeCond(self):
with ops.Graph().as_default() as g:
with self.session(graph=g):
a = constant_op.constant([2.0], name="a")
b = constant_op.constant([2.0], name="b")
def fn():
c = constant_op.constant(3.0)
self.assertEqual([b"loc:@a"], c.op.colocation_groups())
return c
with ops.colocate_with(a.op):
self.assertEquals(
cond_v2.cond_v2(constant_op.constant(True), fn, fn).eval(), 3)
def fn2():
c = constant_op.constant(3.0)
self.assertEqual([b"loc:@a", b"loc:@b"], c.op.colocation_groups())
return c
with ops.colocate_with(a.op):
with ops.colocate_with(b.op):
self.assertEquals(
cond_v2.cond_v2(constant_op.constant(True), fn2, fn2).eval(), 3)
def testColocateWithInAndOutOfCond(self):
with ops.Graph().as_default() as g:
with self.session(graph=g):
a = constant_op.constant([2.0], name="a")
b = constant_op.constant([2.0], name="b")
def fn2():
with ops.colocate_with(b.op):
c = constant_op.constant(3.0)
self.assertEqual([b"loc:@a", b"loc:@b"], c.op.colocation_groups())
return c
with ops.colocate_with(a.op):
self.assertEquals(
cond_v2.cond_v2(constant_op.constant(True), fn2, fn2).eval(), 3)
d = constant_op.constant([2.0], name="d")
self.assertEqual([b"loc:@a"], d.op.colocation_groups())
def testColocateWithInCondGraphPartitioning(self):
with ops.Graph().as_default() as g:
with self.session(
graph=g,
config=config_pb2.ConfigProto(device_count={"CPU": 2})
) as sess:
with ops.device("/device:CPU:0"):
a = constant_op.constant([2.0], name="a")
with ops.device("/device:CPU:1"):
b = constant_op.constant([2.0], name="b")
def fn():
with ops.colocate_with(b.op):
c = math_ops.add(a, a, name="c")
return c
out_cond_2 = cond_v2.cond_v2(constant_op.constant(True), fn, fn)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
sess.run(out_cond_2, options=run_options, run_metadata=run_metadata)
# We expect there to be two partitions because of the
# colocate_with. We are only running the cond, which has a data
# dependency on `a` but not on `b`. So, without the colocate_with
# we would expect execution on just one device.
self.assertTrue(len(run_metadata.partition_graphs) >= 2)
def testDeviceBeforeCond(self):
with ops.Graph().as_default() as g:
with self.session(graph=g):
def fn():
self.assertEqual("", constant_op.constant(3.0).op.device)
return test_ops.device_placement_op()
with ops.device("/device:CPU:0"):
self.assertIn(
compat.as_bytes("CPU:0"),
self.evaluate(cond_v2.cond_v2(constant_op.constant(True),
fn, fn)))
def fn2():
self.assertEqual("", constant_op.constant(3.0).op.device)
return test_ops.device_placement_op()
if test_util.is_gpu_available():
with ops.device("/device:GPU:0"):
self.assertIn(
compat.as_bytes("GPU:0"),
self.evaluate(cond_v2.cond_v2(constant_op.constant(True),
fn2, fn2)))
else:
self.skipTest("Test requrires a GPU to check GPU device placement.")
def testDeviceInAndOutOfCond(self):
with ops.Graph().as_default() as g:
with self.session(
graph=g, config=config_pb2.ConfigProto(device_count={"CPU": 2})):
def fn2():
with ops.device("/device:CPU:1"):
c = constant_op.constant(3.0)
self.assertEqual("/device:CPU:1", c.op.device)
return c
with ops.device("/device:CPU:0"):
self.assertEquals(
cond_v2.cond_v2(constant_op.constant(True), fn2, fn2).eval(), 3)
d = constant_op.constant(4.0)
self.assertEqual("/device:CPU:0", d.op.device)
def testDeviceInCondGraphPartitioning(self):
with ops.Graph().as_default() as g:
with self.session(
graph=g,
config=config_pb2.ConfigProto(device_count={"CPU": 2})
) as sess:
def fn():
with ops.device("/device:CPU:1"):
c = math_ops.add(a, a, name="c")
return c
with ops.device("/device:CPU:0"):
a = constant_op.constant([2.0], name="a")
out_cond_2 = cond_v2.cond_v2(constant_op.constant(True), fn, fn)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
sess.run(out_cond_2, options=run_options, run_metadata=run_metadata)
self.assertTrue(len(run_metadata.partition_graphs) >= 2)
def _cond(pred, true_fn, false_fn, name):
if _is_old_cond():
return control_flow_ops.cond(pred, true_fn, false_fn, name=name)
else:
return cond_v2.cond_v2(pred, true_fn, false_fn, name=name)
def _is_old_cond():
return isinstance(ops.get_default_graph()._get_control_flow_context(),
control_flow_ops.CondContext)
if __name__ == "__main__":
test.main()
|
apache-2.0
| 3,045,569,778,192,330,000
| 31.249318
| 80
| 0.587834
| false
| 3.386071
| true
| false
| false
|
levilucio/SyVOLT
|
ECore_Copier_MM/transformation-Large/HeattributeOUTeGenericTypeSolveRefEAttributeEGenericTypeEAttributeEGenericType.py
|
1
|
5089
|
from core.himesis import Himesis
class HeattributeOUTeGenericTypeSolveRefEAttributeEGenericTypeEAttributeEGenericType(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HeattributeOUTeGenericTypeSolveRefEAttributeEGenericTypeEAttributeEGenericType.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HeattributeOUTeGenericTypeSolveRefEAttributeEGenericTypeEAttributeEGenericType, self).__init__(name='HeattributeOUTeGenericTypeSolveRefEAttributeEGenericTypeEAttributeEGenericType', num_nodes=27, edges=[])
# Add the edges
self.add_edges([[0, 6], [6, 5], [0, 8], [8, 7], [1, 10], [10, 9], [1, 12], [12, 11], [5, 3], [3, 7], [9, 4], [4, 11], [9, 13], [13, 5], [11, 14], [14, 7], [9, 15], [15, 16], [17, 18], [18, 16], [17, 19], [19, 20], [11, 21], [21, 22], [23, 24], [24, 22], [23, 25], [25, 26], [0, 2], [2, 1]])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """eattributeOUTeGenericTypeSolveRefEAttributeEGenericTypeEAttributeEGenericType"""
self["GUID__"] = 7500290523339363630
# Set the node attributes
self.vs[0]["mm__"] = """MatchModel"""
self.vs[0]["GUID__"] = 3345355481358239434
self.vs[1]["mm__"] = """ApplyModel"""
self.vs[1]["GUID__"] = 2661112097501954342
self.vs[2]["mm__"] = """paired_with"""
self.vs[2]["GUID__"] = 2884002363878182308
self.vs[3]["associationType"] = """eGenericType"""
self.vs[3]["mm__"] = """directLink_S"""
self.vs[3]["GUID__"] = 3237503427209923934
self.vs[4]["associationType"] = """eGenericType"""
self.vs[4]["mm__"] = """directLink_T"""
self.vs[4]["GUID__"] = 3487447824061178971
self.vs[5]["name"] = """"""
self.vs[5]["classtype"] = """EAttribute"""
self.vs[5]["mm__"] = """EAttribute"""
self.vs[5]["cardinality"] = """+"""
self.vs[5]["GUID__"] = 180383522542507929
self.vs[6]["mm__"] = """match_contains"""
self.vs[6]["GUID__"] = 9029141784669719181
self.vs[7]["name"] = """"""
self.vs[7]["classtype"] = """EGenericType"""
self.vs[7]["mm__"] = """EGenericType"""
self.vs[7]["cardinality"] = """+"""
self.vs[7]["GUID__"] = 705924063494009604
self.vs[8]["mm__"] = """match_contains"""
self.vs[8]["GUID__"] = 5107917165971943200
self.vs[9]["name"] = """"""
self.vs[9]["classtype"] = """EAttribute"""
self.vs[9]["mm__"] = """EAttribute"""
self.vs[9]["cardinality"] = """1"""
self.vs[9]["GUID__"] = 8056650007601953622
self.vs[10]["mm__"] = """apply_contains"""
self.vs[10]["GUID__"] = 8334363595364440411
self.vs[11]["name"] = """"""
self.vs[11]["classtype"] = """EGenericType"""
self.vs[11]["mm__"] = """EGenericType"""
self.vs[11]["cardinality"] = """1"""
self.vs[11]["GUID__"] = 638205883689070586
self.vs[12]["mm__"] = """apply_contains"""
self.vs[12]["GUID__"] = 5247899703763388228
self.vs[13]["mm__"] = """backward_link"""
self.vs[13]["type"] = """ruleDef"""
self.vs[13]["GUID__"] = 9152985125860709070
self.vs[14]["mm__"] = """backward_link"""
self.vs[14]["type"] = """ruleDef"""
self.vs[14]["GUID__"] = 8939660675905724386
self.vs[15]["mm__"] = """hasAttribute_T"""
self.vs[15]["GUID__"] = 4724550716111922994
self.vs[16]["name"] = """ApplyAttribute"""
self.vs[16]["Type"] = """'String'"""
self.vs[16]["mm__"] = """Attribute"""
self.vs[16]["GUID__"] = 6342963225845912724
self.vs[17]["name"] = """eq_"""
self.vs[17]["mm__"] = """Equation"""
self.vs[17]["GUID__"] = 8425545405611867446
self.vs[18]["mm__"] = """leftExpr"""
self.vs[18]["GUID__"] = 2510594769584959828
self.vs[19]["mm__"] = """rightExpr"""
self.vs[19]["GUID__"] = 2091737926535973939
self.vs[20]["name"] = """solveRef"""
self.vs[20]["Type"] = """'String'"""
self.vs[20]["mm__"] = """Constant"""
self.vs[20]["GUID__"] = 8569331947771768572
self.vs[21]["mm__"] = """hasAttribute_T"""
self.vs[21]["GUID__"] = 684785101581142767
self.vs[22]["name"] = """ApplyAttribute"""
self.vs[22]["Type"] = """'String'"""
self.vs[22]["mm__"] = """Attribute"""
self.vs[22]["GUID__"] = 9082076936603064885
self.vs[23]["name"] = """eq_"""
self.vs[23]["mm__"] = """Equation"""
self.vs[23]["GUID__"] = 4941466305534700405
self.vs[24]["mm__"] = """leftExpr"""
self.vs[24]["GUID__"] = 2426201971054401358
self.vs[25]["mm__"] = """rightExpr"""
self.vs[25]["GUID__"] = 4495076672642341780
self.vs[26]["name"] = """solveRef"""
self.vs[26]["Type"] = """'String'"""
self.vs[26]["mm__"] = """Constant"""
self.vs[26]["GUID__"] = 7828040183483422309
|
mit
| 4,146,659,116,746,106,400
| 48.407767
| 298
| 0.523875
| false
| 2.998821
| false
| false
| false
|
mattmakesmaps/opencv-junk
|
opencv-utils/bin/delete_dupes.py
|
1
|
1635
|
import argparse
import os
import sys
def get_abs_path(in_path):
"""
Given a relative or absolute path, return the absolute path.
:param in_path:
:return:
"""
if os.path.isabs(in_path):
return in_path
else:
return os.path.abspath(in_path)
if __name__ == '__main__':
# Define command line interface
parser = argparse.ArgumentParser(description='Given a source and target directory, '
'if files are present in the source, delete from the target.')
parser.add_argument('source_path', help="Path containing source files.", action="store")
parser.add_argument('target_path', help="Path containing files to delete if they appear in source.", action="store")
parser.add_argument('--dry-run', help="Don't delete, just output files marked for delete.",
action="store_true", default=False)
args = parser.parse_args()
full_source_path = get_abs_path(args.source_path)
full_target_path = get_abs_path(args.target_path)
source_files = os.listdir(full_source_path)
target_files = os.listdir(full_target_path)
if args.dry_run:
sys.stdout.write("DRY RUN: NO FILES WILL BE DELETED\n")
else:
sys.stdout.write("WARNING: THE FOLLOWING FILES WILL BE DELETED\n")
for source_file in source_files:
if source_file in target_files:
target_file = os.path.join(full_target_path, source_file)
sys.stdout.write("%s\n" % target_file)
# Real Run, Delete Files
if not args.dry_run:
os.remove(target_file)
|
mit
| -7,510,965,942,031,719,000
| 37.023256
| 120
| 0.625076
| false
| 3.802326
| false
| false
| false
|
motmot/flymovieformat
|
bootstrap.py
|
1
|
2578
|
##############################################################################
#
# Copyright (c) 2006 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Bootstrap a buildout-based project
Simply run this script in a directory containing a buildout.cfg.
The script accepts buildout command-line options, so you can
use the -c option to specify an alternate configuration file.
$Id$
"""
import os, shutil, sys, tempfile, urllib2
tmpeggs = tempfile.mkdtemp()
is_jython = sys.platform.startswith("java")
try:
import pkg_resources
except ImportError:
ez = {}
exec(urllib2.urlopen("http://peak.telecommunity.com/dist/ez_setup.py").read(), ez)
ez["use_setuptools"](to_dir=tmpeggs, download_delay=0)
import pkg_resources
if sys.platform == "win32":
def quote(c):
if " " in c:
return '"%s"' % c # work around spawn lamosity on windows
else:
return c
else:
def quote(c):
return c
cmd = "from setuptools.command.easy_install import main; main()"
ws = pkg_resources.working_set
if is_jython:
import subprocess
assert (
subprocess.Popen(
[sys.executable]
+ ["-c", quote(cmd), "-mqNxd", quote(tmpeggs), "zc.buildout"],
env=dict(
os.environ,
PYTHONPATH=ws.find(
pkg_resources.Requirement.parse("setuptools")
).location,
),
).wait()
== 0
)
else:
assert (
os.spawnle(
os.P_WAIT,
sys.executable,
quote(sys.executable),
"-c",
quote(cmd),
"-mqNxd",
quote(tmpeggs),
"zc.buildout",
dict(
os.environ,
PYTHONPATH=ws.find(
pkg_resources.Requirement.parse("setuptools")
).location,
),
)
== 0
)
ws.add_entry(tmpeggs)
ws.require("zc.buildout")
import zc.buildout.buildout
zc.buildout.buildout.main(sys.argv[1:] + ["bootstrap"])
shutil.rmtree(tmpeggs)
|
bsd-3-clause
| -4,471,996,207,131,079,000
| 25.040404
| 86
| 0.559348
| false
| 4.047096
| false
| false
| false
|
ruchee/vimrc
|
vimfiles/bundle/vim-python/submodules/pylint/pylint/checkers/refactoring/len_checker.py
|
1
|
4298
|
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/LICENSE
from typing import List
import astroid
from pylint import checkers, interfaces
from pylint.checkers import utils
class LenChecker(checkers.BaseChecker):
"""Checks for incorrect usage of len() inside conditions.
Pep8 states:
For sequences, (strings, lists, tuples), use the fact that empty sequences are false.
Yes: if not seq:
if seq:
No: if len(seq):
if not len(seq):
Problems detected:
* if len(sequence):
* if not len(sequence):
* elif len(sequence):
* elif not len(sequence):
* while len(sequence):
* while not len(sequence):
* assert len(sequence):
* assert not len(sequence):
* bool(len(sequence))
"""
__implements__ = (interfaces.IAstroidChecker,)
# configuration section name
name = "refactoring"
msgs = {
"C1801": (
"Do not use `len(SEQUENCE)` without comparison to determine if a sequence is empty",
"len-as-condition",
"Used when Pylint detects that len(sequence) is being used "
"without explicit comparison inside a condition to determine if a sequence is empty. "
"Instead of coercing the length to a boolean, either "
"rely on the fact that empty sequences are false or "
"compare the length against a scalar.",
)
}
priority = -2
options = ()
@utils.check_messages("len-as-condition")
def visit_call(self, node):
# a len(S) call is used inside a test condition
# could be if, while, assert or if expression statement
# e.g. `if len(S):`
if not utils.is_call_of_name(node, "len"):
return
# the len() call could also be nested together with other
# boolean operations, e.g. `if z or len(x):`
parent = node.parent
while isinstance(parent, astroid.BoolOp):
parent = parent.parent
# we're finally out of any nested boolean operations so check if
# this len() call is part of a test condition
if not utils.is_test_condition(node, parent):
return
len_arg = node.args[0]
generator_or_comprehension = (
astroid.ListComp,
astroid.SetComp,
astroid.DictComp,
astroid.GeneratorExp,
)
if isinstance(len_arg, generator_or_comprehension):
# The node is a generator or comprehension as in len([x for x in ...])
self.add_message("len-as-condition", node=node)
return
try:
instance = next(len_arg.infer())
except astroid.InferenceError:
# Probably undefined-varible, abort check
return
mother_classes = self.base_classes_of_node(instance)
affected_by_pep8 = any(
t in mother_classes for t in ["str", "tuple", "list", "set"]
)
if "range" in mother_classes or (
affected_by_pep8 and not self.instance_has_bool(instance)
):
self.add_message("len-as-condition", node=node)
@staticmethod
def instance_has_bool(class_def: astroid.ClassDef) -> bool:
try:
class_def.getattr("__bool__")
return True
except astroid.AttributeInferenceError:
...
return False
@utils.check_messages("len-as-condition")
def visit_unaryop(self, node):
"""`not len(S)` must become `not S` regardless if the parent block
is a test condition or something else (boolean expression)
e.g. `if not len(S):`"""
if (
isinstance(node, astroid.UnaryOp)
and node.op == "not"
and utils.is_call_of_name(node.operand, "len")
):
self.add_message("len-as-condition", node=node)
@staticmethod
def base_classes_of_node(instance: astroid.nodes.ClassDef) -> List[astroid.Name]:
"""Return all the classes names that a ClassDef inherit from including 'object'."""
try:
return [instance.name] + [x.name for x in instance.ancestors()]
except TypeError:
return [instance.name]
|
mit
| -7,659,394,172,842,702,000
| 34.520661
| 98
| 0.597953
| false
| 4.081671
| false
| false
| false
|
fanchunke1991/flask_website
|
migrations/versions/d8d1b418f41c_.py
|
1
|
1046
|
"""empty message
Revision ID: d8d1b418f41c
Revises: 7f5c9e993be1
Create Date: 2017-02-15 21:03:05.958000
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd8d1b418f41c'
down_revision = '7f5c9e993be1'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('profiles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('nickname', sa.String(length=10), nullable=False),
sa.Column('gender', sa.String(length=4), nullable=False),
sa.Column('address', sa.String(length=4), nullable=True),
sa.Column('discription', sa.Text(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('profiles')
# ### end Alembic commands ###
|
mit
| -7,483,645,544,517,779,000
| 27.27027
| 65
| 0.670172
| false
| 3.26875
| false
| false
| false
|
taedori81/gentlecoffee
|
home/templatetags/gentlecoffee_tags.py
|
1
|
2563
|
from django import template
from ..models import Area
register = template.Library()
@register.assignment_tag(takes_context=True)
def get_site_root(context):
return context['request'].site.root_page
@register.inclusion_tag("home/navbar/navbar.html", takes_context=True)
def display_navbar(context):
parent = get_site_root(context)
if context.has_key('self'):
calling_page = context['self']
else:
calling_page = None
menuitems = parent.get_children().live().in_menu()
for menuitem in menuitems:
menuitem.show_dropdown = menuitem.get_children().live().in_menu().exists()
menuitem.active = (calling_page.url.startswith(menuitem.url) if calling_page else False)
return {
"calling_page": calling_page,
"menuitems": menuitems,
"request": context['request']
}
@register.inclusion_tag('home/navbar/navbar_dropdown.html', takes_context=True)
def display_navbar_dropdown(context, parent):
menuitems_children = parent.get_children().live().in_menu()
return {
"parent": parent,
"menuitems_children": menuitems_children,
"request": context['request'],
}
@register.inclusion_tag('home/include/side_menu_area.html', takes_context=True)
def display_side_menu_area(context):
request = context['request']
areas = Area.objects.all()
# TODO Need to build href for filter the page
area_items = []
for area in areas:
item_name = area.area_name
item_href = '?area=' + item_name
area_items.append({"name": item_name, "href": item_href})
return {
"request": request,
"areas": areas,
"area_items": area_items
}
@register.filter
def url_param_dict_to_list(url_items_dict):
"""Turn this dictionary into a param list for the URL"""
params_list = ""
for key,value in url_items_dict:
if key != "page":
params_list += "&%s=%s" % (key, value)
return params_list
@register.filter
def get_item(dictionary, key):
return dictionary.get(key)
@register.inclusion_tag('home/include/blog_item.html', takes_context=True)
def display_blog_list(context, blog_list):
blogs = []
for blog in blog_list:
for block in blog.body:
if block.block_type == 'heading':
blog.heading = block.value
if block.block_type == 'photo':
blog.photo = block.value
blogs.append(blog)
request = context['request']
return {
"request": request,
"blogs": blogs,
}
|
bsd-3-clause
| 5,064,137,035,026,292,000
| 26.265957
| 96
| 0.630121
| false
| 3.604782
| false
| false
| false
|
garethr/django-project-templates
|
setup.py
|
1
|
1174
|
from setuptools import setup, find_packages
import os
setup(
name='django-project-templates',
version = "0.11",
description="Paster templates for creating Django projects",
author='Gareth Rushgrove',
author_email='gareth@morethanseven.net',
url='http://github.com/garethr/django-project-templates/',
packages = find_packages('src'),
package_dir = {'':'src'},
license = "MIT",
keywords = "django paster",
install_requires=[
'setuptools',
'PasteScript>=1.3',
'Cheetah',
'fabric',
],
include_package_data=True,
zip_safe=False,
entry_points="""
[paste.paster_create_template]
django_project=django_project_templates.pastertemplates:DjangoProjectTemplate
django_cruisecontrol_project=django_project_templates.pastertemplates:DjangoCruiseControlTemplate
newsapps_project=django_project_templates.pastertemplates:NewsAppsProjectTemplate
""",
classifiers = [
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
'Operating System :: OS Independent',
'Programming Language :: Python',
],
)
|
mit
| -1,137,145,708,265,585,200
| 32.542857
| 105
| 0.66184
| false
| 4.020548
| false
| false
| false
|
migasfree/migasfree-backend
|
migasfree/client/models/fault.py
|
1
|
4720
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2021 Jose Antonio Chavarría <jachavar@gmail.com>
# Copyright (c) 2015-2021 Alberto Gacías <alberto@migasfree.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.db import models
from django.db.models.aggregates import Count
from django.utils.translation import gettext_lazy as _
from ...core.models import Project
from .event import Event
from .fault_definition import FaultDefinition
class DomainFaultManager(models.Manager):
def get_queryset(self):
return super().get_queryset().select_related(
'project',
'fault_definition',
'computer',
'computer__project',
'computer__sync_user',
)
def scope(self, user):
qs = self.get_queryset()
if user and not user.is_view_all():
qs = qs.filter(
project_id__in=user.get_projects(),
computer_id__in=user.get_computers()
)
return qs
class UncheckedManager(DomainFaultManager):
def get_queryset(self):
return super().get_queryset().filter(checked=0)
def scope(self, user):
qs = super().scope(user).filter(checked=0)
if user:
qs = qs.filter(models.Q(fault_definition__users__id__in=[user.id, ])
| models.Q(fault_definition__users=None))
else:
qs = qs.filter(fault_definition__users=None)
return qs
class FaultManager(DomainFaultManager):
def create(self, computer, definition, result):
obj = Fault()
obj.computer = computer
obj.project = computer.project
obj.fault_definition = definition
obj.result = result
obj.save()
return obj
class Fault(Event):
USER_FILTER_CHOICES = (
('me', _('To check for me')),
('only_me', _('Assigned to me')),
('others', _('Assigned to others')),
('unassigned', _('Unassigned')),
)
fault_definition = models.ForeignKey(
FaultDefinition,
on_delete=models.CASCADE,
verbose_name=_("fault definition")
)
result = models.TextField(
verbose_name=_("result"),
null=True,
blank=True
)
checked = models.BooleanField(
verbose_name=_("checked"),
default=False,
)
project = models.ForeignKey(
Project,
on_delete=models.CASCADE,
verbose_name=_("project")
)
objects = FaultManager()
unchecked = UncheckedManager()
@staticmethod
def unchecked_count(user=None):
queryset = Fault.unchecked.scope(user)
if user:
queryset = queryset.filter(
models.Q(fault_definition__users__id__in=[user.id, ])
| models.Q(fault_definition__users=None)
)
return queryset.count()
@staticmethod
def unchecked_by_project(user):
total = Fault.unchecked_count(user)
projects = list(Fault.unchecked.scope(user).values(
'project__name',
'project__id',
'project__platform__id',
).annotate(
count=Count('id')
).order_by('project__platform__id', '-count'))
platforms = list(Fault.unchecked.scope(user).values(
'project__platform__id',
'project__platform__name'
).annotate(
count=Count('id')
).order_by('project__platform__id', '-count'))
return {
'total': total,
'inner': platforms,
'outer': projects,
}
@staticmethod
def group_by_definition(user=None):
return Fault.objects.scope(user).values(
'fault_definition__id', 'fault_definition__name'
).annotate(
count=models.aggregates.Count('fault_definition__id')
).order_by('-count')
def checked_ok(self):
self.checked = True
self.save()
def list_users(self):
return self.fault_definition.list_users()
class Meta:
app_label = 'client'
verbose_name = _('Fault')
verbose_name_plural = _('Faults')
|
gpl-3.0
| 4,239,233,166,619,057,000
| 27.421687
| 80
| 0.597075
| false
| 4.095486
| false
| false
| false
|
skurtapp/django-rest-framework-jwt
|
setup.py
|
1
|
3378
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import shutil
import sys
from setuptools import setup
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
with open(os.path.join(package, '__init__.py'), 'rb') as init_py:
src = init_py.read().decode('utf-8')
return re.search("__version__ = ['\"]([^'\"]+)['\"]", src).group(1)
name = 'skurt-djangorestframework-jwt'
version = get_version('skurt_rest_framework_jwt')
package = 'skurt_rest_framework_jwt'
description = 'JSON Web Token based authentication for Django REST framework'
url = 'https://github.com/skurtapp/django-rest-framework-jwt'
author = 'Jose Padilla'
author_email = 'jpadilla@getblimp.com'
license = 'MIT'
install_requires = [
'PyJWT>=1.4.0,<2.0.0'
]
def read(*paths):
"""
Build a file path from paths and return the contents.
"""
with open(os.path.join(*paths), 'r') as f:
return f.read()
def get_packages(package):
"""
Return root package and all sub-packages.
"""
return [dirpath
for dirpath, dirnames, filenames in os.walk(package)
if os.path.exists(os.path.join(dirpath, '__init__.py'))]
def get_package_data(package):
"""
Return all files under the root package, that are not in a
package themselves.
"""
walk = [(dirpath.replace(package + os.sep, '', 1), filenames)
for dirpath, dirnames, filenames in os.walk(package)
if not os.path.exists(os.path.join(dirpath, '__init__.py'))]
filepaths = []
for base, filenames in walk:
filepaths.extend([os.path.join(base, filename)
for filename in filenames])
return {package: filepaths}
if sys.argv[-1] == 'publish':
if os.system('pip freeze | grep wheel'):
print('wheel not installed.\nUse `pip install wheel`.\nExiting.')
sys.exit()
if os.system('pip freeze | grep twine'):
print('twine not installed.\nUse `pip install twine`.\nExiting.')
sys.exit()
os.system('python setup.py sdist bdist_wheel')
os.system('twine upload dist/*')
shutil.rmtree('dist')
shutil.rmtree('build')
shutil.rmtree('skurt_djangorestframework_jwt.egg-info')
print('You probably want to also tag the version now:')
print(" git tag -a {0} -m 'version {0}'".format(version))
print(' git push --tags')
sys.exit()
setup(
name=name,
version=version,
url=url,
license=license,
description=description,
long_description=read('README.rst'),
author=author,
author_email=author_email,
packages=get_packages(package),
package_data=get_package_data(package),
install_requires=install_requires,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP',
]
)
|
mit
| 6,180,108,985,916,996,000
| 29.160714
| 77
| 0.617229
| false
| 3.679739
| false
| false
| false
|
ResolveWang/WeiboSpider
|
db/dao.py
|
1
|
7194
|
from sqlalchemy import text
from sqlalchemy.exc import IntegrityError as SqlalchemyIntegrityError
from pymysql.err import IntegrityError as PymysqlIntegrityError
from sqlalchemy.exc import InvalidRequestError
from .basic import db_session
from .models import (
LoginInfo, KeywordsWbdata, KeyWords, SeedIds, UserRelation,
WeiboComment, WeiboRepost, User, WeiboData, WeiboPraise
)
from decorators import db_commit_decorator
class CommonOper:
@classmethod
@db_commit_decorator
def add_one(cls, data):
db_session.add(data)
db_session.commit()
@classmethod
@db_commit_decorator
def add_all(cls, datas):
try:
db_session.add_all(datas)
db_session.commit()
except (SqlalchemyIntegrityError, PymysqlIntegrityError, InvalidRequestError):
for data in datas:
cls.add_one(data)
class LoginInfoOper:
@classmethod
def get_login_info(cls):
return db_session.query(LoginInfo.name, LoginInfo.password, LoginInfo.enable). \
filter(text('enable=1')).all()
@classmethod
@db_commit_decorator
def freeze_account(cls, name, rs):
"""
:param name: login account
:param rs: 0 stands for banned,1 stands for normal,2 stands for name or password is invalid
:return:
"""
account = db_session.query(LoginInfo).filter(LoginInfo.name == name).first()
account.enable = rs
db_session.commit()
class KeywordsDataOper:
@classmethod
@db_commit_decorator
def insert_keyword_wbid(cls, keyword_id, wbid):
keyword_wbdata = KeywordsWbdata()
keyword_wbdata.wb_id = wbid
keyword_wbdata.keyword_id = keyword_id
db_session.add(keyword_wbdata)
db_session.commit()
class KeywordsOper:
@classmethod
def get_search_keywords(cls):
return db_session.query(KeyWords.keyword, KeyWords.id).filter(text('enable=1')).all()
@classmethod
@db_commit_decorator
def set_useless_keyword(cls, keyword):
search_word = db_session.query(KeyWords).filter(KeyWords.keyword == keyword).first()
search_word.enable = 0
db_session.commit()
class SeedidsOper:
@classmethod
def get_seed_ids(cls):
"""
Get all user id to be crawled
:return: user ids
"""
return db_session.query(SeedIds.uid).filter(text('is_crawled=0')).all()
@classmethod
def get_home_ids(cls):
"""
Get all user id who's home pages need to be crawled
:return: user ids
"""
return db_session.query(SeedIds.uid).filter(text('home_crawled=0')).all()
@classmethod
@db_commit_decorator
def set_seed_crawled(cls, uid, result):
"""
:param uid: user id that is crawled
:param result: crawling result, 1 stands for succeed, 2 stands for fail
:return: None
"""
seed = db_session.query(SeedIds).filter(SeedIds.uid == uid).first()
if seed and seed.is_crawled == 0:
seed.is_crawled = result
else:
seed = SeedIds(uid=uid, is_crawled=result)
db_session.add(seed)
db_session.commit()
@classmethod
def get_seed_by_id(cls, uid):
return db_session.query(SeedIds).filter(SeedIds.uid == uid).first()
@classmethod
@db_commit_decorator
def insert_seeds(cls, ids):
db_session.execute(SeedIds.__table__.insert().prefix_with('IGNORE'), [{'uid': i} for i in ids])
db_session.commit()
@classmethod
@db_commit_decorator
def set_seed_other_crawled(cls, uid):
"""
update it if user id already exists, else insert
:param uid: user id
:return: None
"""
seed = cls.get_seed_by_id(uid)
if seed is None:
seed = SeedIds(uid=uid, is_crawled=1, other_crawled=1, home_crawled=1)
db_session.add(seed)
else:
seed.other_crawled = 1
db_session.commit()
@classmethod
@db_commit_decorator
def set_seed_home_crawled(cls, uid):
"""
:param uid: user id
:return: None
"""
seed = cls.get_seed_by_id(uid)
if seed is None:
seed = SeedIds(uid=uid, is_crawled=0, other_crawled=0, home_crawled=1)
db_session.add(seed)
else:
seed.home_crawled = 1
db_session.commit()
class UserOper(CommonOper):
@classmethod
def get_user_by_uid(cls, uid):
return db_session.query(User).filter(User.uid == uid).first()
@classmethod
def get_user_by_name(cls,user_name):
return db_session.query(User).filter(User.name == user_name).first()
class UserRelationOper(CommonOper):
@classmethod
def get_user_by_uid(cls, uid, other_id, type):
user = db_session.query(UserRelation).filter_by(user_id = uid, follow_or_fans_id = other_id).first()
if user:
return True
else:
return False
class WbDataOper(CommonOper):
@classmethod
def get_wb_by_mid(cls, mid):
return db_session.query(WeiboData).filter(WeiboData.weibo_id == mid).first()
@classmethod
def get_weibo_comment_not_crawled(cls):
return db_session.query(WeiboData.weibo_id).filter(text('comment_crawled=0')).all()
@classmethod
def get_weibo_praise_not_crawled(cls):
return db_session.query(WeiboData.weibo_id).filter(text('praise_crawled=0')).all()
@classmethod
def get_weibo_repost_not_crawled(cls):
return db_session.query(WeiboData.weibo_id, WeiboData.uid).filter(text('repost_crawled=0')).all()
@classmethod
def get_weibo_dialogue_not_crawled(cls):
return db_session.query(WeiboData.weibo_id).filter(text('dialogue_crawled=0')).all()
@classmethod
@db_commit_decorator
def set_weibo_comment_crawled(cls, mid):
data = cls.get_wb_by_mid(mid)
if data:
data.comment_crawled = 1
db_session.commit()
@classmethod
@db_commit_decorator
def set_weibo_praise_crawled(cls, mid):
data = cls.get_wb_by_mid(mid)
if data:
data.praise_crawled = 1
db_session.commit()
@classmethod
@db_commit_decorator
def set_weibo_repost_crawled(cls, mid):
data = cls.get_wb_by_mid(mid)
if data:
data.repost_crawled = 1
db_session.commit()
@classmethod
@db_commit_decorator
def set_weibo_dialogue_crawled(cls, mid):
data = cls.get_wb_by_mid(mid)
if data:
data.dialogue_crawled = 1
db_session.commit()
class CommentOper(CommonOper):
@classmethod
def get_comment_by_id(cls, cid):
return db_session.query(WeiboComment).filter(WeiboComment.comment_id == cid).first()
class PraiseOper(CommonOper):
@classmethod
def get_Praise_by_id(cls, pid):
return db_session.query(WeiboPraise).filter(WeiboPraise.weibo_id == pid).first()
class RepostOper(CommonOper):
@classmethod
def get_repost_by_rid(cls, rid):
return db_session.query(WeiboRepost).filter(WeiboRepost.weibo_id == rid).first()
|
mit
| 6,026,674,423,055,466,000
| 29.210084
| 108
| 0.624478
| false
| 3.4173
| false
| false
| false
|
korrosivesec/crits
|
crits/emails/handlers.py
|
1
|
65619
|
from __future__ import absolute_import
import datetime
import email as eml
from email.parser import Parser
from email.utils import parseaddr, getaddresses, mktime_tz, parsedate_tz
import hashlib
import json
import magic
import re
import yaml
import io
import sys
import olefile
from dateutil.parser import parse as date_parser
from django.conf import settings
from crits.core.forms import DownloadFileForm
from crits.emails.forms import EmailYAMLForm
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.template.loader import render_to_string
from crits.campaigns.forms import CampaignForm
from crits.config.config import CRITsConfig
from crits.core.crits_mongoengine import json_handler, create_embedded_source
from crits.core.crits_mongoengine import EmbeddedCampaign
from crits.core.data_tools import clean_dict
from crits.core.exceptions import ZipFileError
from crits.core.handlers import class_from_id
from crits.core.handlers import build_jtable, jtable_ajax_list, jtable_ajax_delete
from crits.core.handlers import csv_export
from crits.core.user_tools import user_sources, is_admin, is_user_favorite
from crits.core.user_tools import is_user_subscribed
from crits.domains.handlers import get_valid_root_domain
from crits.emails.email import Email
from crits.indicators.handlers import handle_indicator_ind
from crits.indicators.indicator import Indicator
from crits.notifications.handlers import remove_user_from_notification
from crits.samples.handlers import handle_file, handle_uploaded_file, mail_sample
from crits.services.handlers import run_triage
from crits.vocabulary.relationships import RelationshipTypes
from crits.vocabulary.indicators import (
IndicatorTypes,
IndicatorAttackTypes,
IndicatorThreatTypes
)
def create_email_field_dict(field_name,
field_type,
field_value,
field_displayed_text,
is_allow_create_indicator,
is_href,
is_editable,
is_email_list,
is_splunk,
href_search_field=None):
"""
Generates a 1:1 dictionary from all of the input fields.
Returns:
A dictionary of all the input fields, with the input parameter names
each as a key and its associated value as the value pair.
"""
return {"field_name": field_name,
"field_type": field_type,
"field_value": field_value,
"field_displayed_text": field_displayed_text,
"is_allow_create_indicator": is_allow_create_indicator,
"is_href": is_href,
"is_editable": is_editable,
"is_email_list": is_email_list,
"is_splunk": is_splunk,
"href_search_field": href_search_field
}
def generate_email_csv(request):
"""
Generate a CSV file of the Email information
:param request: The request for this CSV.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
response = csv_export(request, Email)
return response
def get_email_formatted(email_id, analyst, data_format):
"""
Format an email in YAML or JSON.
:param email_id: The ObjectId of the email.
:type email_id: str
:param analyst: The user requesting the data.
:type analyst: str
:param data_format: The format you want the data in.
:type data_format: "json" or "yaml"
:returns: :class:`django.http.HttpResponse`
"""
sources = user_sources(analyst)
email = Email.objects(id=email_id, source__name__in=sources).first()
if not email:
return HttpResponse(json.dumps({}), content_type="application/json")
exclude = [
"created",
"source",
"relationships",
"schema_version",
"campaign",
"analysis",
"bucket_list",
"ticket",
"releasability",
"unsupported_attrs",
"status",
"objects",
"modified",
"analyst",
"_id",
"to",
"cc",
"raw_headers",
]
if data_format == "yaml":
data = {"email_yaml": email.to_yaml(exclude=exclude)}
elif data_format == "json":
data = {"email_yaml": email.to_json(exclude=exclude)}
else:
data = {"email_yaml": {}}
return HttpResponse(json.dumps(data), content_type="application/json")
def get_email_detail(email_id, analyst):
"""
Generate the email details page.
:param email_id: The ObjectId of the email.
:type email_id: str
:param analyst: The user requesting the data.
:type analyst: str
:returns: tuple
"""
template = None
sources = user_sources(analyst)
email = Email.objects(id=email_id, source__name__in=sources).first()
if not email:
template = "error.html"
args = {'error': "ID does not exist or insufficient privs for source"}
else:
email.sanitize(username="%s" % analyst, sources=sources)
update_data_form = EmailYAMLForm(analyst)
campaign_form = CampaignForm()
download_form = DownloadFileForm(initial={"obj_type": 'Email',
"obj_id":email_id})
# remove pending notifications for user
remove_user_from_notification("%s" % analyst, email.id, 'Email')
# subscription
subscription = {
'type': 'Email',
'id': email.id,
'subscribed': is_user_subscribed("%s" % analyst, 'Email',
email.id),
}
# objects
objects = email.sort_objects()
# relationships
relationships = email.sort_relationships("%s" % analyst, meta=True)
# relationship
relationship = {
'type': 'Email',
'value': email.id
}
# comments
comments = {'comments': email.get_comments(),
'url_key': email.id}
#screenshots
screenshots = email.get_screenshots(analyst)
# favorites
favorite = is_user_favorite("%s" % analyst, 'Email', email.id)
email_fields = []
email_fields.append(create_email_field_dict(
"from_address", # field_name
IndicatorTypes.EMAIL_FROM, # field_type
email.from_address, # field_value
"From", # field_displayed_text
# is_allow_create_indicator
# is_href
# is_editable
# is_email_list
# is_splunk
True, True, True, False, True,
href_search_field="from" # href_search_field
))
email_fields.append(create_email_field_dict(
"sender",
IndicatorTypes.EMAIL_SENDER,
email.sender,
"Sender",
True, True, True, False, True,
href_search_field="sender"
))
email_fields.append(create_email_field_dict(
"Email To",
None,
email.to,
"To",
False, True, True, True, False,
href_search_field=None
))
email_fields.append(create_email_field_dict(
"cc",
"Email CC",
email.cc,
"CC",
False, True, True, True, False,
href_search_field=None
))
email_fields.append(create_email_field_dict(
"date",
"Email Date",
email.date,
"Date",
False, False, True, False, False,
href_search_field=None
))
email_fields.append(create_email_field_dict(
"isodate",
"Email ISODate",
email.isodate,
"ISODate",
False, False, False, False, False,
href_search_field=None
))
email_fields.append(create_email_field_dict(
"subject",
IndicatorTypes.EMAIL_SUBJECT,
email.subject,
"Subject",
True, True, True, False, False,
href_search_field="subject"
))
email_fields.append(create_email_field_dict(
"x_mailer",
IndicatorTypes.EMAIL_X_MAILER,
email.x_mailer,
"X-Mailer",
True, True, True, False, False,
href_search_field="x_mailer"
))
email_fields.append(create_email_field_dict(
"reply_to",
IndicatorTypes.EMAIL_REPLY_TO,
email.reply_to,
"Reply To",
True, True, True, False, False,
href_search_field="reply_to"
))
email_fields.append(create_email_field_dict(
"message_id",
IndicatorTypes.EMAIL_MESSAGE_ID,
email.message_id,
"Message ID",
True, False, True, False, False,
href_search_field=None
))
email_fields.append(create_email_field_dict(
"helo",
IndicatorTypes.EMAIL_HELO,
email.helo,
"helo",
True, True, True, False, False,
href_search_field="helo"
))
email_fields.append(create_email_field_dict(
"boundary",
IndicatorTypes.EMAIL_BOUNDARY,
email.boundary,
"Boundary",
True, False, True, False, False,
href_search_field=None
))
email_fields.append(create_email_field_dict(
"originating_ip",
IndicatorTypes.EMAIL_ORIGINATING_IP,
email.originating_ip,
"Originating IP",
True, True, True, False, True,
href_search_field="originating_ip"
))
email_fields.append(create_email_field_dict(
"x_originating_ip",
IndicatorTypes.EMAIL_X_ORIGINATING_IP,
email.x_originating_ip,
"X-Originating IP",
True, True, True, False, True,
href_search_field="x_originating_ip"
))
# analysis results
service_results = email.get_analysis_results()
args = {'objects': objects,
'email_fields': email_fields,
'relationships': relationships,
'comments': comments,
'favorite': favorite,
'relationship': relationship,
'screenshots': screenshots,
'subscription': subscription,
'email': email,
'campaign_form': campaign_form,
'download_form': download_form,
'update_data_form': update_data_form,
'admin': is_admin(analyst),
'service_results': service_results,
'rt_url': settings.RT_URL}
return template, args
def generate_email_jtable(request, option):
"""
Generate email jtable.
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
obj_type = Email
type_ = "email"
mapper = obj_type._meta['jtable_opts']
if option == "jtlist":
# Sets display url
details_url = mapper['details_url']
details_url_key = mapper['details_url_key']
fields = mapper['fields']
response = jtable_ajax_list(obj_type,
details_url,
details_url_key,
request,
includes=fields)
if 'Records' in response:
for doc in response['Records']:
if doc['to']:
doc['recip'] = len(doc['to'].split(','))
else:
doc['recip'] = 0
if doc['cc']:
doc['recip'] += len(doc['cc'].split(','))
return HttpResponse(json.dumps(response, default=json_handler),
content_type="application/json")
if option == "jtdelete":
response = {"Result": "ERROR"}
if jtable_ajax_delete(obj_type, request):
response = {"Result": "OK"}
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
jtopts = {
'title': "Emails",
'default_sort': mapper['default_sort'],
'listurl': reverse('crits.%ss.views.%ss_listing' % (type_,
type_), args=('jtlist',)),
'deleteurl': reverse('crits.%ss.views.%ss_listing' % (type_,
type_), args=('jtdelete',)),
'searchurl': reverse(mapper['searchurl']),
'fields': mapper['jtopts_fields'],
'hidden_fields': mapper['hidden_fields'],
'linked_fields': mapper['linked_fields'],
'details_link': mapper['details_link'],
'no_sort': mapper['no_sort']
}
jtable = build_jtable(jtopts, request)
jtable['toolbar'] = [
{
'tooltip': "'All Emails'",
'text': "'All'",
'click': "function () {$('#email_listing').jtable('load', {'refresh': 'yes'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'New Emails'",
'text': "'New'",
'click': "function () {$('#email_listing').jtable('load', {'refresh': 'yes', 'status': 'New'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'In Progress Emails'",
'text': "'In Progress'",
'click': "function () {$('#email_listing').jtable('load', {'refresh': 'yes', 'status': 'In Progress'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Analyzed Emails'",
'text': "'Analyzed'",
'click': "function () {$('#email_listing').jtable('load', {'refresh': 'yes', 'status': 'Analyzed'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Deprecated Emails'",
'text': "'Deprecated'",
'click': "function () {$('#email_listing').jtable('load', {'refresh': 'yes', 'status': 'Deprecated'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Add Email'",
'text': "'Add Email'",
'click': "function () {$('#new-email-fields').click()}",
},
{
'tooltip': "'Upload Outlook Email'",
'text': "'Upload .msg'",
'click': "function () {$('#new-email-outlook').click()}",
},
]
if option == "inline":
return render_to_response("jtable.html",
{'jtable': jtable,
'jtid': '%s_listing' % type_,
'button' : '%ss_tab' % type_},
RequestContext(request))
else:
return render_to_response("%s_listing.html" % type_,
{'jtable': jtable,
'jtid': '%s_listing' % type_},
RequestContext(request))
def handle_email_fields(data, analyst, method):
"""
Take email fields and convert them into an email object.
:param data: The fields to include in the email.
:type data: dict
:param analyst: The user creating this email object.
:type analyst: str
:param method: The method of acquiring this email.
:type method: str
:returns: dict with keys:
"status" (boolean),
"object" The email object if successful,
"reason" (str).
"""
result = {
'status': False,
'reason': "",
'object': None,
'data': None
}
# Date and source are the only required ones.
# If there is no campaign confidence, default it to low.
# Remove these items from data so they are not added when merged.
sourcename = data.get('source', None)
del data['source']
if data.get('source_method', None):
method = method + " - " + data.get('source_method', None)
try:
del data['source_method']
except:
pass
reference = data.get('source_reference', None)
try:
del data['source_reference']
except:
pass
bucket_list = data.get('bucket_list', None)
try:
del data['bucket_list']
except:
pass
ticket = data.get('ticket', None)
try:
del data['ticket']
except:
pass
campaign = data.get('campaign', None)
try:
del data['campaign']
except:
pass
confidence = data.get('campaign_confidence', 'low')
try:
del data['campaign_confidence']
except:
pass
try:
for x in ('cc', 'to'):
y = data.get(x, None)
if isinstance(y, basestring):
if len(y) > 0:
tmp_y = y.split(',')
y_final = [ty.strip() for ty in tmp_y if len(ty.strip()) > 0]
data[x] = y_final
else:
data[x] = []
elif not y:
data[x] = []
except:
pass
new_email = Email()
new_email.merge(data)
if bucket_list:
new_email.add_bucket_list(bucket_list, analyst)
if ticket:
new_email.add_ticket(ticket, analyst)
new_email.source = [create_embedded_source(sourcename,
reference=reference,
method=method,
analyst=analyst)]
if campaign:
ec = EmbeddedCampaign(name=campaign,
confidence=confidence,
description="",
analyst=analyst,
date=datetime.datetime.now())
new_email.add_campaign(ec)
try:
new_email.save(username=analyst)
new_email.reload()
run_triage(new_email, analyst)
result['object'] = new_email
result['status'] = True
except Exception, e:
result['reason'] = "Failed to save object.\n<br /><pre>%s</pre>" % str(e)
return result
def handle_json(data, sourcename, reference, analyst, method,
save_unsupported=True, campaign=None, confidence=None,
bucket_list=None, ticket=None):
"""
Take email in JSON and convert them into an email object.
:param data: The data for the email.
:type data: dict
:param sourcename: The name of the source providing this email.
:type sourcename: str
:param reference: The reference to the data from the source.
:type reference: str
:param analyst: The user creating this email object.
:type analyst: str
:param method: The method of acquiring this email.
:type method: str
:param save_unsupported: Save any unsupported fields instead of ignoring.
:type save_unsupported: boolean
:param campaign: The campaign to attribute to this email.
:type campaign: str
:param confidence: Confidence level of the campaign.
:type confidence: str
:param bucket_list: The bucket(s) to assign to this data.
:type bucket_list: str
:param ticket: The ticket to assign to this data.
:type ticket: str
:returns: dict with keys:
"status" (boolean),
"object" The email object if successful,
"data" the converted email data.
"reason" (str).
"""
result = {
'status': False,
'reason': "",
'object': None,
'data': None
}
try:
converted = json.loads(data)
if isinstance(converted, dict) == False:
raise
except Exception, e:
result["reason"] = "Cannot convert data to JSON.\n<br /><pre>%s</pre>" % str(e)
return result
result['data'] = converted
new_email = dict_to_email(result['data'], save_unsupported=save_unsupported)
if bucket_list:
new_email.add_bucket_list(bucket_list, analyst)
if ticket:
new_email.add_ticket(ticket, analyst)
if campaign:
if not confidence:
confidence = "low"
ec = EmbeddedCampaign(name=campaign,
confidence=confidence,
description="",
analyst=analyst,
date=datetime.datetime.now())
new_email.add_campaign(ec)
result['object'] = new_email
result['object'].source = [create_embedded_source(sourcename,
reference=reference,
method=method,
analyst=analyst)]
try:
result['object'].save(username=analyst)
result['object'].reload()
run_triage(result['object'], analyst)
except Exception, e:
result['reason'] = "Failed to save object.\n<br /><pre>%s</pre>" % str(e)
result['status'] = True
return result
# if email_id is provided it is the existing email id to modify.
def handle_yaml(data, sourcename, reference, analyst, method, email_id=None,
save_unsupported=True, campaign=None, confidence=None,
bucket_list=None, ticket=None):
"""
Take email in YAML and convert them into an email object.
:param data: The data for the email.
:type data: dict
:param sourcename: The name of the source providing this email.
:type sourcename: str
:param reference: The reference to the data from the source.
:type reference: str
:param analyst: The user creating this email object.
:type analyst: str
:param method: The method of acquiring this email.
:type method: str
:param email_id: The ObjectId of the existing email to update.
:type email_id: str
:param save_unsupported: Save any unsupported fields instead of ignoring.
:type save_unsupported: boolean
:param campaign: The campaign to attribute to this email.
:type campaign: str
:param confidence: Confidence level of the campaign.
:type confidence: str
:param bucket_list: The bucket(s) to assign to this data.
:type bucket_list: str
:param ticket: The ticket to assign to this data.
:type ticket: str
:returns: dict with keys:
"status" (boolean),
"object" The email object if successful,
"data" the converted email data.
"reason" (str).
"""
result = {
'status': False,
'reason': "",
'object': None,
'data': None
}
try:
converted = yaml.load(data)
if isinstance(converted, dict) == False:
raise
except Exception, e:
result["reason"] = "Cannot convert data to YAML.\n<br /><pre>%s</pre>" % str(e)
return result
result['data'] = converted
new_email = dict_to_email(result['data'], save_unsupported=save_unsupported)
if bucket_list:
new_email.add_bucket_list(bucket_list, analyst)
if ticket:
new_email.add_ticket(ticket, analyst)
if campaign:
if not confidence:
confidence = "low"
ec = EmbeddedCampaign(name=campaign,
confidence=confidence,
description="",
analyst=analyst,
date=datetime.datetime.now())
new_email.add_campaign(ec)
result['object'] = new_email
if email_id:
old_email = class_from_id('Email', email_id)
if not old_email:
result['reason'] = "Unknown email_id."
return result
# Can not merge with a source?
# For now, just save the original source and put it back after merge.
saved_source = old_email.source
# XXX: If you use the "Edit YAML" button and edit the "from" field
# it gets put into the new email object in dict_to_email() correctly
# but calling to_dict() on that object results in a 'from' key being
# put into the dictionary. Thus, the following line will result in
# your new 'from' field being stuffed into unsupported_attrs.
# old_email.merge(result['object'].to_dict(), True)
# To work around this (for now) convert the new email object to a
# dictionary and manually replace 'from' with the from_address
# property.
tmp = result['object'].to_dict()
if 'from' in tmp:
tmp['from_address'] = result['object'].from_address
old_email.merge(tmp, True)
old_email.source = saved_source
try:
old_email.save(username=analyst)
except Exception, e:
result['reason'] = "Failed to save object.\n<br /><pre>%s</pre>" % str(e)
return result
else:
result['object'].source = [create_embedded_source(sourcename,
reference=reference,
method=method,
analyst=analyst)]
try:
result['object'].save(username=analyst)
result['object'].reload()
run_triage(result['object'], analyst)
except Exception, e:
result['reason'] = "Failed to save object.\n<br /><pre>%s</pre>" % str(e)
return result
result['status'] = True
return result
def handle_msg(data, sourcename, reference, analyst, method, password='',
campaign=None, confidence=None, bucket_list=None, ticket=None):
"""
Take email in MSG and convert them into an email object.
:param data: The data for the email.
:type data: dict
:param sourcename: The name of the source providing this email.
:type sourcename: str
:param reference: The reference to the data from the source.
:type reference: str
:param analyst: The user creating this email object.
:type analyst: str
:param method: The method of acquiring this email.
:type method: str
:param password: The password for the attachment.
:type password: str
:param campaign: The campaign to attribute to this email.
:type campaign: str
:param confidence: Confidence level of the campaign.
:type confidence: str
:param bucket_list: The bucket(s) to assign to this data.
:type bucket_list: str
:param ticket: The ticket to assign to this data.
:type ticket: str
:returns: dict with keys:
"status" (boolean),
"obj_id" The email ObjectId if successful,
"message" (str)
"reason" (str).
"""
response = {'status': False}
result = parse_ole_file(data)
if result.has_key('error'):
response['reason'] = result['error']
return response
result['email']['source'] = sourcename
result['email']['source_reference'] = reference
result['email']['campaign'] = campaign
result['email']['campaign_confidence'] = confidence
result['email']['bucket_list'] = bucket_list
result['email']['ticket'] = ticket
if result['email'].has_key('date'):
result['email']['isodate'] = date_parser(result['email']['date'],
fuzzy=True)
obj = handle_email_fields(result['email'], analyst, method)
if not obj["status"]:
response['reason'] = obj['reason']
return response
email = obj.get('object')
# Process attachments and upload as samples
attach_messages = []
for file in result['attachments']:
type_ = file.get('type', '')
if 'pkcs7' not in type_:
mimetype = magic.from_buffer(file.get('data', ''), mime=True)
if mimetype is None:
file_format = 'raw'
elif 'application/zip' in mimetype:
file_format = 'zip'
elif 'application/x-rar' in mimetype:
file_format = 'rar'
else:
file_format = 'raw'
try:
cleaned_data = {'file_format': file_format,
'password': password}
r = create_email_attachment(email, cleaned_data, analyst, sourcename,
method, reference, campaign, confidence,
"", "", file.get('data', ''), file.get('name', ''))
if 'success' in r:
if not r['success']:
attach_messages.append("%s: %s" % (file.get('name', ''),
r['message']))
else:
attach_messages.append("%s: Added Successfully!" % file.get('name', ''))
except BaseException:
error_message = 'The email uploaded successfully, but there was an error\
uploading the attachment ' + file['name'] + '\n\n' + str(sys.exc_info())
response['reason'] = error_message
return response
else:
attach_messages.append('%s: Cannot decrypt attachment (pkcs7).' % file.get('name', ''))
if len(attach_messages):
response['message'] = '<br/>'.join(attach_messages)
response['status'] = True
response['obj_id'] = obj['object'].id
return response
def handle_pasted_eml(data, sourcename, reference, analyst, method,
parent_type=None, parent_id=None, campaign=None,
confidence=None, bucket_list=None, ticket=None):
"""
Take email in EML and convert them into an email object.
:param data: The data for the email.
:type data: dict
:param sourcename: The name of the source providing this email.
:type sourcename: str
:param reference: The reference to the data from the source.
:type reference: str
:param analyst: The user creating this email object.
:type analyst: str
:param method: The method of acquiring this email.
:type method: str
:param parent_type: The top-level object type of the parent.
:type parent_type: str
:param parent_id: The ObjectId of the parent.
:type parent_id: str
:param campaign: The campaign to attribute to this email.
:type campaign: str
:param confidence: Confidence level of the campaign.
:type confidence: str
:param bucket_list: The bucket(s) to assign to this data.
:type bucket_list: str
:param ticket: The ticket to assign to this data.
:type ticket: str
:returns: dict with keys:
"status" (boolean),
"reason" (str),
"object" The email object if successful,
"data" the converted email data,
"attachments" (dict).
"""
# Try to fix headers where we lost whitespace indents
# Split by newline, parse/fix headers, join by newline
hfieldre = re.compile('^\S+:\s')
boundaryre = re.compile('boundary="?([^\s"\']+)"?')
emldata = []
boundary = None
isbody = False
if not isinstance(data, str):
data = data.read()
for line in data.split("\n"):
# We match the regex for a boundary definition
m = boundaryre.search(line)
if m:
boundary = m.group(1)
# content boundary exists and we reached it
if boundary and boundary in line:
isbody = True
# If we are not in the body and see somethign that does not look
# like a valid header field, prepend a space to attach this line
# to the previous header we found
if not isbody and not hfieldre.match(line):
line = " %s" % line
emldata.append(line)
emldata = "\n".join(emldata)
return handle_eml(emldata, sourcename, reference, analyst, method, parent_type,
parent_id, campaign, confidence, bucket_list, ticket)
def handle_eml(data, sourcename, reference, analyst, method, parent_type=None,
parent_id=None, campaign=None, confidence=None, bucket_list=None,
ticket=None):
"""
Take email in EML and convert them into an email object.
:param data: The data for the email.
:type data: dict
:param sourcename: The name of the source providing this email.
:type sourcename: str
:param reference: The reference to the data from the source.
:type reference: str
:param analyst: The user creating this email object.
:type analyst: str
:param method: The method of acquiring this email.
:type method: str
:param parent_type: The top-level object type of the parent.
:type parent_type: str
:param parent_id: The ObjectId of the parent.
:type parent_id: str
:param campaign: The campaign to attribute to this email.
:type campaign: str
:param confidence: Confidence level of the campaign.
:type confidence: str
:param bucket_list: The bucket(s) to assign to this data.
:type bucket_list: str
:param ticket: The ticket to assign to this data.
:type ticket: str
:returns: dict with keys:
"status" (boolean),
"reason" (str),
"object" The email object if successful,
"data" the converted email data,
"attachments" (dict).
"""
result = {
'status': False,
'reason': "",
'object': None,
'data': None,
'attachments': {}
}
if not sourcename:
result['reason'] = "Missing source information."
return result
msg_import = {'raw_header': ''}
reImap = re.compile(r"(\*\s\d+\sFETCH\s.+?\r\n)(.+)\).*?OK\s(UID\sFETCH\scompleted|Success)", re.M | re.S)
# search for SMTP dialog
start = data.find("DATA")
end = data.find("\x0d\x0a\x2e\x0d\x0a")
if start >= 0 and end >= 0:
premail = data[:start]
mailfrom = None
rcptto = None
for preheaders in premail.splitlines():
mfpos = preheaders.find("MAIL FROM")
if mfpos > -1:
try:
mailfrom = unicode(preheaders[mfpos + 10:])
except UnicodeDecodeError:
mailfrom = unicode(preheaders[mfpos + 10:], errors="replace")
rcpos = preheaders.find("RCPT TO")
if rcpos > -1:
try:
rcptto = unicode(preheaders[rcpos + 9:])
except UnicodeDecodeError:
rcptto = unicode(preheaders[rcpos + 9:], errors="replace")
if mailfrom:
msg_import['mailfrom'] = mailfrom
if rcptto:
msg_import['rcptto'] = rcptto
mail1 = data[start + 6:end]
stripped_mail = ""
for line in mail1.splitlines(True):
# Strip SMTP response codes. Some people like to grab a single
# TCP session in wireshark and save it to disk and call it an EML.
if line[:4] in ['200 ', '211 ', '214 ', '220 ', '221 ', '250 ',
'250-', '251 ', '354 ', '421 ', '450 ', '451 ',
'452 ', '500 ', '501 ', '502 ', '503 ', '504 ',
'521 ', '530 ', '550 ', '551 ', '552 ', '553 ',
'554 ']:
continue
stripped_mail += line
else:
# No SMTP dialog found, search for IMAP markers
match = reImap.search(data)
if match:
stripped_mail = match.groups()[1]
else:
stripped_mail = data
msg = eml.message_from_string(str(stripped_mail))
if not msg.items():
result['reason'] = """Could not parse email. Possibly the input does
not conform to a Internet Message style headers
and header continuation lines..."""
return result
# clean up headers
for d in msg.items():
cleand = ''.join([x for x in d[1] if (ord(x) < 127 and ord(x) >= 32)])
msg_import[d[0].replace(".",
"").replace("$",
"").replace("\x00",
"").replace("-",
"_").lower()] = cleand
msg_import['raw_header'] += d[0] + ": " + cleand + "\n"
# Rip out anything that looks like an email address and store it.
if 'to' in msg_import:
to_list = re.findall(r'[\w\-][\w\-\.]+@[\w\-][\w\-\.]+[a-zA-Z]{1,4}',
msg_import['to'])
msg_import['to'] = []
msg_import['to'] = [i for i in to_list if i not in msg_import['to']]
# Parse the body of the email
msg_import["raw_body"] = ""
for part in msg.walk():
if part.get_content_maintype() == 'multipart':
continue
if part.get_content_maintype() == "text":
content = part.get_payload(decode=True)
if content:
try:
message_part = unicode(content)
except UnicodeDecodeError:
message_part = unicode(content, errors="replace")
msg_import["raw_body"] = msg_import["raw_body"] + \
message_part + "\n"
# Check for attachment in mail parts
filename = part.get_filename()
attach = part.get_payload(decode=True)
if attach is not None and len(attach):
md5 = hashlib.md5(attach).hexdigest()
mtype = magic.from_buffer(attach)
if filename is not None:
try:
filename = unicode(filename)
except UnicodeDecodeError:
filename = unicode(filename, errors="replace")
else:
filename = md5
result['attachments'][md5] = {
'filename': filename,
'magic': mtype,
'blob': attach
}
result['data'] = msg_import
new_email = dict_to_email(result['data'])
if bucket_list:
new_email.add_bucket_list(bucket_list, analyst)
if ticket:
new_email.add_ticket(ticket, analyst)
if campaign:
if not confidence:
confidence = "low"
ec = EmbeddedCampaign(name=campaign,
confidence=confidence,
description="",
analyst=analyst,
date=datetime.datetime.now())
new_email.add_campaign(ec)
result['object'] = new_email
result['object'].source = [create_embedded_source(sourcename,
reference=reference,
method=method,
analyst=analyst)]
# Save the Email first, so we can have the id to use to create
# relationships.
if not result['object'].date:
result['object'].date = None
try:
result['object'].save(username=analyst)
result['object'].reload()
run_triage(result['object'], analyst)
except Exception, e:
result['reason'] = "Failed1 to save email.\n<br /><pre>" + \
str(e) + "</pre>"
return result
# Relate the email back to the pcap, if it came from PCAP.
if parent_id and parent_type:
rel_item = class_from_id(parent_type, parent_id)
if rel_item:
rel_type = RelationshipTypes.CONTAINED_WITHIN
ret = result['object'].add_relationship(rel_item,
rel_type,
analyst=analyst,
get_rels=False)
if not ret['success']:
result['reason'] = "Failed to create relationship.\n<br /><pre>"
+ result['message'] + "</pre>"
return result
# Save the email again since it now has a new relationship.
try:
result['object'].save(username=analyst)
except Exception, e:
result['reason'] = "Failed to save email.\n<br /><pre>"
+ str(e) + "</pre>"
return result
for (md5_, attachment) in result['attachments'].items():
if handle_file(attachment['filename'],
attachment['blob'],
sourcename,
method='eml_processor',
reference=reference,
related_id=result['object'].id,
user=analyst,
md5_digest=md5_,
related_type='Email',
campaign=campaign,
confidence=confidence,
bucket_list=bucket_list,
ticket=ticket,
relationship=RelationshipTypes.CONTAINED_WITHIN) == None:
result['reason'] = "Failed to save attachment.\n<br /><pre>"
+ md5_ + "</pre>"
return result
result['status'] = True
return result
def dict_to_email(d, save_unsupported=True):
"""
Convert a dictionary to an email.
Standardize all key names:
- Convert hyphens and whitespace to underscores
- Remove all non-alphanumeric and non-underscore characters.
- Combine multiple underscores.
- convert alpha characters to lowercase.
:param d: The dictionary to convert.
:type d: dict
:param save_unsupported: Whether or not to save unsupported fields.
:type save_unsupported: boolean
:returns: :class:`crits.email.email.Email`
"""
for key in d:
newkey = re.sub('[\s-]', '_', key)
newkey = re.sub('[\W]', '', newkey)
newkey = re.sub('_+', '_', newkey)
newkey = newkey.lower()
if key != newkey:
d[newkey] = d[key]
del d[key]
# Remove keys which we don't want the user to modify via YAML.
keys = ('schema_version', 'comments', 'objects', 'campaign',
'relationships', 'source', 'releasability', 'analysis',
'bucket_list', 'ticket', 'objects')
clean_dict(d, keys)
if 'x_originating_ip' in d and d['x_originating_ip']:
d['x_originating_ip'] = re.findall(r'[0-9]+(?:\.[0-9]+){3}',
d['x_originating_ip'])[0]
if 'date' in d and d['date']:
if isinstance(d['date'], datetime.datetime):
d['isodate'] = d['date']
d['date'] = str(d['date'])
else:
d['isodate'] = date_parser(d['date'], fuzzy=True)
if 'to' in d and isinstance(d['to'], basestring) and len(d['to']) > 0:
d['to'] = [d['to']]
if 'cc' in d and isinstance(d['cc'], basestring) and len(d['cc']) > 0:
d['cc'] = [d['cc']]
if 'from' in d:
d['from_address'] = d['from']
del d['from']
if save_unsupported:
for (k, v) in d.get('unsupported_attrs', {}).items():
d[k] = v
if 'unsupported_attrs' in d:
del d['unsupported_attrs']
crits_email = Email()
crits_email.merge(d)
return crits_email
def update_email_header_value(email_id, type_, value, analyst):
"""
Update a header value for an email.
:param email_id: The ObjectId of the email to update.
:type email_id: str
:param type_: The header type.
:type type_: str
:param value: The header value.
:type value: str
:param analyst: The user updating the header field.
:type analyst: str
:returns: dict with keys:
"success" (boolean),
"message" (str),
"isodate" (datetime.datetime) if the header field was "date".
"""
if type_ in ('to', 'cc'):
bad_chars = "<>^&(){}[]!#$%=+;:'/\|?~`"
if any((bad_char in value) for bad_char in bad_chars):
return {'success': False, 'message': "Invalid characters in list"}
email = Email.objects(id=email_id).first()
if email:
try:
if type_ in ('to', 'cc'):
vlist = value.split(",")
vfinal = []
for v in vlist:
if len(v.strip()) > 0:
vfinal.append(v.strip())
value = vfinal
setattr(email, type_, value)
if type_ == 'date':
isodate = date_parser(value, fuzzy=True)
email.isodate = isodate
email.save(username=analyst)
if type_ == 'date':
result = {'success': True,
'message': "Successfully updated email",
'isodate': email.isodate.strftime("%Y-%m-%d %H:%M:%S.%f")}
elif type_ in ('to', 'cc'):
links = ""
for v in value:
# dirty ugly hack to "urlencode" the resulting URL
url = reverse('crits.targets.views.target_info',
args=[v]).replace('@', '%40')
links += '<a href="%s">%s</a>, ' % (url, v)
result = {'success': True,
'message': "Successfully updated email",
'links': links}
else:
result = {'success': True,
'message': "Successfully updated email"}
except Exception, e:
result = {'success': False, 'message': e}
else:
result = {'success': False, 'message': "Could not find email"}
return result
def create_indicator_from_header_field(email, header_field, ind_type,
analyst, request):
"""
Create an indicator out of the header field.
:param email: The email to get the header from.
:type email: :class:`crits.emails.email.Email`
:param header_field: The header type.
:type header_field: str
:param ind_type: The Indicator type to use.
:type ind_type: str
:param analyst: The user updating the header field.
:type analyst: str
:param request: The Django request.
:type request: :class:`django.http.HttpRequest`
:returns: dict with keys:
"success" (boolean),
"message" (str),
"""
value = getattr(email, header_field)
# Check to make sure the "value" is valid
if value == None or value.strip() == "":
result = {
'success': False,
'message': "Can't create indicator from email field [" +
str(header_field) + "] with an empty value field",
}
return result
elif ind_type == None or ind_type.strip() == "":
result = {
'success': False,
'message': "Can't create indicator from email field " +
"with an empty type field",
}
return result
newindicator = handle_indicator_ind(value,
email.source,
ind_type,
threat_type=IndicatorThreatTypes.UNKNOWN,
attack_type=IndicatorAttackTypes.UNKNOWN,
analyst=analyst)
if newindicator.get('objectid'):
indicator = Indicator.objects(id=newindicator['objectid']).first()
results = email.add_relationship(indicator,
RelationshipTypes.RELATED_TO,
analyst=analyst,
get_rels=True)
if results['success']:
email.save(username=analyst)
relationship = {'type': 'Email', 'value': email.id}
message = render_to_string('relationships_listing_widget.html',
{'relationship': relationship,
'relationships': results['message']},
RequestContext(request))
result = {'success': True, 'message': message}
else:
result = {
'success': False,
'message': "Error adding relationship: %s" % results['message']
}
else:
result = {
'success': False,
'message': "Error adding relationship: Could not find email/indicator",
}
return result
def create_email_attachment(email, cleaned_data, analyst, source, method="Upload",
reference="", campaign=None, confidence='low',
bucket_list=None, ticket=None, filedata=None,
filename=None, md5=None, email_addr=None, inherit_sources=False):
"""
Create an attachment for an email.
:param email: The email to use.
:type email: :class:`crits.emails.email.Email`
:param cleaned_data: Cleaned form information about the email.
:type cleaned_data: dict
:param analyst: The user creating this attachment.
:type analyst: str
:param source: The name of the source.
:type source: str
:param method: The method for this file upload.
:type method: str
:param reference: The source reference.
:type reference: str
:param campaign: The campaign to attribute to this attachment.
:type campaign: str
:param confidence: The campaign confidence.
:type confidence: str
:param bucket_list: The list of buckets to assign to this attachment.
:type bucket_list: str
:param ticket: The ticket to assign to this attachment.
:type ticket: str
:param filedata: The attachment.
:type filedata: request file data.
:param filename: The name of the file.
:type filename: str
:param md5: The MD5 of the file.
:type md5: str
:param email_addr: Email address to which to email the sample
:type email_addr: str
:param inherit_sources: 'True' if attachment should inherit Email's Source(s)
:type inherit_sources: bool
:returns: dict with keys "success" (boolean) and "message" (str).
"""
response = {'success': False,
'message': 'Unknown error; unable to upload file.'}
if filename:
filename = filename.strip()
# If selected, new sample inherits the campaigns of the related email.
if cleaned_data.get('inherit_campaigns'):
if campaign:
email.campaign.append(EmbeddedCampaign(name=campaign, confidence=confidence, analyst=analyst))
campaign = email.campaign
inherited_source = email.source if inherit_sources else None
try:
if filedata:
result = handle_uploaded_file(filedata,
source,
method,
reference,
cleaned_data['file_format'],
cleaned_data['password'],
analyst,
campaign,
confidence,
related_id=email.id,
related_type='Email',
filename=filename,
bucket_list=bucket_list,
ticket=ticket,
inherited_source=inherited_source)
else:
if md5:
md5 = md5.strip().lower()
result = handle_uploaded_file(None,
source,
method,
reference,
cleaned_data['file_format'],
None,
analyst,
campaign,
confidence,
related_id=email.id,
related_type='Email',
filename=filename,
md5=md5,
bucket_list=bucket_list,
ticket=ticket,
inherited_source=inherited_source,
is_return_only_md5=False)
except ZipFileError, zfe:
return {'success': False, 'message': zfe.value}
else:
if len(result) > 1:
response = {'success': True, 'message': 'Files uploaded successfully. '}
elif len(result) == 1:
if not filedata:
response['success'] = result[0].get('success', False)
if(response['success'] == False):
response['message'] = result[0].get('message', response.get('message'))
else:
result = [result[0].get('object').md5]
response['message'] = 'File uploaded successfully. '
else:
response = {'success': True, 'message': 'Files uploaded successfully. '}
if not response['success']:
return response
else:
if email_addr:
for s in result:
email_errmsg = mail_sample(s, [email_addr])
if email_errmsg is not None:
response['success'] = False
msg = "<br>Error emailing sample %s: %s\n" % (s, email_errmsg)
response['message'] = response['message'] + msg
return response
def parse_ole_file(file):
"""
Parse an OLE2.0 file to obtain data inside an email including attachments.
References:
http://www.fileformat.info/format/outlookmsg/
http://www.decalage.info/en/python/olefileio
https://code.google.com/p/pyflag/source/browse/src/FileFormats/OLE2.py
http://cpansearch.perl.org/src/MVZ/Email-Outlook-Message-0.912/lib/Email/Outlook/Message.pm
"""
header = file.read(len(olefile.MAGIC))
# Verify the file is in OLE2 format first
if header != olefile.MAGIC:
return {'error': 'The upload file is not a valid Outlook file. It must be in OLE2 format (.msg)'}
msg = {'subject': '_0037',
'body': '_1000',
'header': '_007D',
'message_class': '_001A',
'recipient_email': '_39FE',
'attachment_name': '_3707',
'attachment_data': '_3701',
'attachment_type': '_370E',
}
file.seek(0)
data = file.read()
msg_file = io.BytesIO(data)
ole = olefile.OleFileIO(msg_file)
# Helper function to grab data out of stream objects
def get_stream_data(entry):
stream = ole.openstream(entry)
data = stream.read()
stream.close()
return data
# Parse the OLE streams and get attachments, subject, body, headers, and class
# The email dict is what will be put into MongoDB for CRITs
attachments = {}
email = {}
email['to'] = []
for entry in ole.listdir():
if 'attach' in entry[0]:
# Attachments are keyed by directory entry in the stream
# e.g. '__attach_version1.0_#00000000'
if entry[0] not in attachments:
attachments[entry[0]] = {}
if msg['attachment_name'] in entry[-1]:
attachments[entry[0]].update({'name': get_stream_data(entry).decode('utf-16')})
if msg['attachment_data'] in entry[-1]:
attachments[entry[0]].update({'data': get_stream_data(entry)})
if msg['attachment_type'] in entry[-1]:
attachments[entry[0]].update({'type': get_stream_data(entry).decode('utf-16')})
else:
if msg['subject'] in entry[-1]:
email['subject'] = get_stream_data(entry).decode('utf-16')
if msg['body'] in entry[-1]:
email['raw_body'] = get_stream_data(entry).decode('utf-16')
if msg['header'] in entry[-1]:
email['raw_header'] = get_stream_data(entry).decode('utf-16')
if msg['recipient_email'] in entry[-1]:
email['to'].append(get_stream_data(entry).decode('utf-16').lower())
if msg['message_class'] in entry[-1]:
message_class = get_stream_data(entry).decode('utf-16').lower()
ole.close()
# Process headers to extract data
headers = Parser().parse(io.StringIO(email.get('raw_header', '')), headersonly=True)
email['from_address'] = headers.get('From', '')
email['reply_to'] = headers.get('Reply-To', '')
email['date'] = headers.get('Date', '')
email['message_id'] = headers.get('Message-ID', '')
email['x_mailer'] = headers.get('X-Mailer', '')
email['x_originating_ip'] = headers.get('X-Originating-IP', '')
email['sender'] = getaddresses(headers.get_all('Sender', '')) # getaddresses returns list [(name, email)]
# If no sender, set the email address found in From:
if not email['sender']:
email['sender'] = getaddresses(headers.get_all('From', ''))
if len(email['sender']) > 0:
email['sender'] = email['sender'][0][1]
else:
email['sender'] = ''
# Get list of recipients and add to email['to'] if not already there
# Some emails do not have a stream for recipients (_39FE)
to = headers.get_all('To', [])
cc = headers.get_all('CC', [])
resent_to = headers.get_all('Resent-To', [])
resent_cc = headers.get_all('Resent-CC', [])
recipients = getaddresses(to + cc + resent_to + resent_cc)
for r in recipients:
addr = r[1].lower()
# If BCC then addr could be blank or set to undisclosed-recipients:
if addr and addr not in email['to'] and not re.match(r'^undisclosed-recipients[:;]?(?::;)?$', addr):
email['to'].append(addr)
# Check for encrypted and signed messages. The body will be empty in this case
# Message classes: http://msdn.microsoft.com/en-us/library/ee200767%28v=exchg.80%29.aspx
if message_class == 'ipm.note.smime' and not email.has_key('raw_body'):
email['raw_body'] = '<ENCRYPTED>'
if message_class == 'ipm.note.smime.multipartsigned' and not email.has_key('raw_body'):
email['raw_body'] = '<DIGITALLY SIGNED: body in smime.p7m>'
# Parse Received headers to get Helo and X-Originating-IP
# This can be unreliable since Received headers can be reordered by gateways
# and the date may not be in sync between systems. This is best effort based
# on the date as it appears in the Received header. In some cases there is no
# Received header present
#
# Received: from __ by __ with __ id __ for __ ; date
#
# See helper functions _get_received_from, _get_received_by, _get_received_date
current_datetime = datetime.datetime.now()
earliest_helo_date = current_datetime
earliest_ip_date = current_datetime
email['helo'] = ''
originating_ip = ''
last_from = ''
helo_for = ''
all_received = headers.get_all('Received')
crits_config = CRITsConfig.objects().first()
if crits_config:
email_domain = get_valid_root_domain(crits_config.crits_email.split('@')[-1])[0]
else:
email_domain = ''
if all_received:
for received in all_received:
received_from = _get_received_from(received).lower() # from __
received_by = _get_received_by(received).lower() # by __ with __ id __
received_for = _get_received_for(received).lower() # for <email>
date = _get_received_date(received) # date
try:
current_date = datetime.datetime.fromtimestamp(mktime_tz(parsedate_tz(date))) # rfc2822 -> Time -> Datetime
except:
# Exception will occur if the date is not in the Received header. This could be
# where the originating IP is. e.g. Received: from 11.12.13.14 by rms-us019 with HTTP
current_date = datetime.datetime.min
grp = re.search(r'\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b', received_from)
if grp and not _is_reserved_ip(grp.group()) and ' localhost ' not in received_from:
if email_domain not in received_from and email_domain in received_by:
if(current_date < earliest_helo_date):
helo_for = parseaddr(received_for.strip())[1]
earliest_helo_date = current_date
email['helo'] = received_from
else:
last_from = received_from
if grp and not email['x_originating_ip'] and not _is_reserved_ip(grp.group()):
if current_date < earliest_ip_date:
earliest_ip_date = current_date
originating_ip = grp.group()
# If no proper Helo found, just use the last received_from without a reserved IP
if not email['helo']:
email['helo'] = last_from
# Set the extracted originating ip. If not found, then just use the IP from Helo
if not email['x_originating_ip']:
if originating_ip:
email['x_originating_ip'] = originating_ip
else:
grp = re.search(r'\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b', email['helo'])
if grp:
email['x_originating_ip'] = grp.group()
# Add the email address found in Helo
if helo_for and '@' in helo_for:
if helo_for not in email['to']:
email['to'].append(helo_for)
# If no Helo date found, then try to use the Date field
if earliest_helo_date == current_datetime and email['date']:
earliest_helo_date = datetime.datetime.fromtimestamp(mktime_tz(parsedate_tz(email['date'])))
return {'email': email, 'attachments': attachments.values(), 'received_date': earliest_helo_date}
def _get_received_from(received_header):
"""
Helper function to grab the 'from' part of a Received email header.
"""
received_header = received_header.replace('\r', '').replace('\n', '')
info = received_header.split('by ')
try:
return info[0]
except:
''
def _get_received_by(received_header):
"""
Helper function to grab the 'by' part of a Received email header.
"""
received_header = received_header.replace('\r', '').replace('\n', '')
info = received_header.split('by ')
try:
return info[-1].split('for ')[0]
except:
return ''
def _get_received_for(received_header):
"""
Helper function to grab the 'for' part of a Received email header
WARNING: If 'for' is not there, the entire Received header is returned.
"""
received_header = received_header.replace('\r', '').replace('\n', '')
info = received_header.split('for ')
try:
return info[-1].split(';')[0]
except:
return ''
def _get_received_date(received_header):
"""
Helper function to grab the date part of a Received email header.
"""
received_header = received_header.replace('\r', '').replace('\n', '')
date = received_header.split(';')
try:
return date[-1]
except:
''
def _is_reserved_ip(ip):
"""
Simple test to detect if an IP is private or loopback. Does not check
validity of the address.
"""
grp = re.match(r'127.\d{1,3}.\d{1,3}.\d{1,3}', ip) # 127.0.0.0/8
if grp:
return True
grp = re.match(r'10.\d{1,3}.\d{1,3}.\d{1,3}', ip) # 10.0.0.0/8
if grp:
return True
grp = re.match(r'192.168.\d{1,3}.\d{1,3}', ip) # 192.168.0.0/16
if grp:
return True
grp = re.match(r'172.(1[6-9]|2[0-9]|3[0-1]).\d{1,3}.\d{1,3}', ip) # 172.16.0.0/12
if grp:
return True
# No matches
return False
|
mit
| 7,987,814,643,264,909,000
| 37.396138
| 123
| 0.529283
| false
| 4.244437
| false
| false
| false
|
tomsilver/nupic
|
nupic/regions/AnomalyRegion.py
|
1
|
2946
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Region for computing the anomaly score."""
import numpy
from nupic.algorithms import anomaly
from nupic.regions.PyRegion import PyRegion
class AnomalyRegion(PyRegion):
"""Region for computing the anomaly score."""
@classmethod
def getSpec(cls):
return {
"description": ("Region that computes anomaly scores from temporal "
"memory."),
"singleNodeOnly": True,
"inputs": {
"activeColumns": {
"description": "The currently active columns.",
"regionLevel": True,
"dataType": "Real32",
"count": 0,
"required": True,
"isDefaultInput": False,
"requireSplitterMap": False,
},
"predictedColumns": {
"description": "The currently predicted columns.",
"regionLevel": True,
"dataType": "Real32",
"count": 0,
"required": True,
"isDefaultInput": False,
"requireSplitterMap": False,
},
},
"outputs": {
"rawAnomalyScore": {
"description": "The raw anomaly score.",
"dataType": "Real32",
"count": 1,
"regionLevel": True,
"isDefaultOutput": True,
},
},
"parameters": {
},
"commands": {
},
}
def __init__(self, *args, **kwargs):
self.prevPredictedColumns = numpy.zeros([], dtype="float32")
def initialize(self, inputs, outputs):
pass
def compute(self, inputs, outputs):
activeColumns = inputs["activeColumns"].nonzero()[0]
rawAnomalyScore = anomaly.computeRawAnomalyScore(
activeColumns, self.prevPredictedColumns)
self.prevPredictedColumns = inputs["predictedColumns"].nonzero()[0]
outputs["rawAnomalyScore"][0] = rawAnomalyScore
|
gpl-3.0
| 5,956,932,422,393,378,000
| 30.677419
| 76
| 0.569246
| false
| 4.743961
| false
| false
| false
|
rrice2004/LanSkorpian
|
LanSkorpian.py
|
1
|
3034
|
## LANSKORPIAN - Personal home network scanner
## Copyright (C) Robert Rice <mindseyes@gmail.com>
## This program is published under a MIT license
print "\n"
print ("-" * 60)
print " LANSKORPIAN Personal Home Port Scanner"
print ("-" * 60)
print "\n"
# Import modules
import subprocess
import ipaddress
from datetime import datetime
import socket
import sys
from com_ports import*
# input a network address
net_addr = raw_input("Enter a network address in CIDR format(ex.192.168.1.0/24): ")
start_time = datetime.now()
# Banner
print ("-" * 60)
print "Please wait, scanning network", net_addr
print ("-" * 60)
# Create the network
ip_net = ipaddress.ip_network(net_addr)
# Get all hosts on that network
all_hosts = list(ip_net.hosts())
# Configure subprocess to hide the console window
info = subprocess.STARTUPINFO()
info.dwFlags |= subprocess.STARTF_USESHOWWINDOW
info.wShowWindow = subprocess.SW_HIDE
# For each IP address in the subnet,
# run the ping command with subprocess.popen interface
for i in range(len(all_hosts)):
output = subprocess.Popen(['ping', '-n 5', '1', '-w', '100', str(all_hosts[i])], stdout=subprocess.PIPE, startupinfo=info).communicate()[0]
if "Destination host unreachable" in output.decode('utf-8'):
pass
elif "Request timed out" in output.decode('utf-8'):
pass
else:
print(str(all_hosts[i]) + " is Active")
stop_time = datetime.now()
total = stop_time - start_time
print 'Scanning Completed in: ', total
print "\n"
####################################################################
# Start individual host scan
remoteServer = raw_input("Enter Active host to scan: ")
remoteServerIP = socket.gethostbyname(remoteServer)
name = socket.getfqdn(remoteServerIP)
# Banner
print ("-" * 60)
print "Please wait, scanning Active host", name
print ("-" * 60)
# Check what time the scan started
t1 = datetime.now()
# check and returns the service name if available
def get_service(port):
port = str(port)
if port in common_ports:
return common_ports[port]
else:
return unknown_ports
#start scanning ports
try:
for port in range(1,1025):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((remoteServerIP, port))
if result == 0:
print "Port {}: Open".format(port), get_service(port)
sock.close()
except KeyboardInterrupt:
t2 = datetime.now()
cancel = t2 - t1
print "Scan cancled by user in:", cancel
sys.exit()
except socket.gaierror:
print 'Hostname could not be resolved. Exiting'
sys.exit()
except socket.error:
print "Couldn't connect to server"
sys.exit()
# Checking the time again
t3 = datetime.now()
# Calculates the difference of time, to see how long it took to run the script
total = t3 - t1
# Printing the information to screen
print 'Scanning Completed in: ', total
|
mit
| 7,584,437,968,339,130,000
| 24.155172
| 143
| 0.644693
| false
| 3.424379
| false
| false
| false
|
shitolepriya/Saloon_erp
|
erpnext/buying/doctype/supplier/supplier.py
|
1
|
4173
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import frappe.defaults
from frappe import msgprint, _
from frappe.model.naming import make_autoname
from erpnext.utilities.address_and_contact import load_address_and_contact
from erpnext.utilities.transaction_base import TransactionBase
from erpnext.accounts.party import validate_party_accounts
class Supplier(TransactionBase):
def get_feed(self):
return self.supplier_name
def onload(self):
"""Load address and contacts in `__onload`"""
load_address_and_contact(self, "supplier")
def autoname(self):
supp_master_name = frappe.defaults.get_global_default('supp_master_name')
if supp_master_name == 'Supplier Name':
self.name = self.supplier_name
# self.name = self.supplier_name + '-' + self.company
else:
self.name = make_autoname(self.naming_series + '.#####')
# self.name = make_autoname(self.company + '-' + self.naming_series + '.#####')
def update_address(self):
frappe.db.sql("""update `tabAddress` set supplier_name=%s, modified=NOW()
where supplier=%s""", (self.supplier_name, self.name))
def update_contact(self):
frappe.db.sql("""update `tabContact` set supplier_name=%s, modified=NOW()
where supplier=%s""", (self.supplier_name, self.name))
def on_update(self):
if not self.naming_series:
self.naming_series = ''
self.update_address()
self.update_contact()
def validate(self):
#validation for Naming Series mandatory field...
if frappe.defaults.get_global_default('supp_master_name') == 'Naming Series':
if not self.naming_series:
msgprint(_("Series is mandatory"), raise_exception=1)
validate_party_accounts(self)
def get_contacts(self,nm):
if nm:
contact_details =frappe.db.convert_to_lists(frappe.db.sql("select name, CONCAT(IFNULL(first_name,''),' ',IFNULL(last_name,'')),contact_no,email_id from `tabContact` where supplier = %s", nm))
return contact_details
else:
return ''
def delete_supplier_address(self):
for rec in frappe.db.sql("select * from `tabAddress` where supplier=%s", (self.name,), as_dict=1):
frappe.db.sql("delete from `tabAddress` where name=%s",(rec['name']))
def delete_supplier_contact(self):
for contact in frappe.db.sql_list("""select name from `tabContact`
where supplier=%s""", self.name):
frappe.delete_doc("Contact", contact)
def on_trash(self):
self.delete_supplier_address()
self.delete_supplier_contact()
def after_rename(self, olddn, newdn, merge=False):
set_field = ''
if frappe.defaults.get_global_default('supp_master_name') == 'Supplier Name':
frappe.db.set(self, "supplier_name", newdn)
self.update_contact()
set_field = ", supplier_name=%(newdn)s"
self.update_supplier_address(newdn, set_field)
def update_supplier_address(self, newdn, set_field):
frappe.db.sql("""update `tabAddress` set address_title=%(newdn)s
{set_field} where supplier=%(newdn)s"""\
.format(set_field=set_field), ({"newdn": newdn}))
@frappe.whitelist()
def get_dashboard_info(supplier):
if not frappe.has_permission("Supplier", "read", supplier):
frappe.throw(_("No permission"))
out = {}
for doctype in ["Supplier Quotation", "Purchase Order", "Purchase Receipt", "Purchase Invoice"]:
out[doctype] = frappe.db.get_value(doctype,
{"supplier": supplier, "docstatus": ["!=", 2] }, "count(*)")
billing_this_year = frappe.db.sql("""
select sum(ifnull(credit_in_account_currency, 0)) - sum(ifnull(debit_in_account_currency, 0))
from `tabGL Entry`
where voucher_type='Purchase Invoice' and party_type = 'Supplier'
and party=%s and fiscal_year = %s""",
(supplier, frappe.db.get_default("fiscal_year")))
total_unpaid = frappe.db.sql("""select sum(outstanding_amount)
from `tabPurchase Invoice`
where supplier=%s and docstatus = 1""", supplier)
out["billing_this_year"] = billing_this_year[0][0] if billing_this_year else 0
out["total_unpaid"] = total_unpaid[0][0] if total_unpaid else 0
out["company_currency"] = frappe.db.sql_list("select distinct default_currency from tabCompany")
return out
|
agpl-3.0
| -6,728,424,563,410,634,000
| 35.929204
| 194
| 0.705967
| false
| 3.207533
| false
| false
| false
|
SigPloiter/SigPloit
|
gtp/attacks/dos/massive_dos.py
|
1
|
5715
|
#!/usr/bin/env python
# encoding: utf-8
# massive_dos.py
#
# Copyright 2018 Rosalia d'Alessandro
#
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os
import sys
from optparse import OptionParser
from gtp_v2_core.utilities.configuration_parser import parseConfigs
from commons.message_handler import MessageHandler
from commons.globals import message_queue
__all__ = []
__version__ = 0.1
GTP_PORT = 2123
DEFAULT_MSG_FREQ = 20
DEFAULT_SLEEPTIME = 1
DEBUG = 0
##
## ATTACKING TOOL
##
## @brief Main file to execute the script.
##
## This file can test a DoS attack sending a Delete PDN Connection Set Request
## (101) for a specific FQ-CSID.FQ-CSID is calculated using node type id, mcc, mnc and source ip
## provided in the config file.
##
## Use the -h option to enter the help menu and determine what to do.
##
## Basic usage examples:
## * $ python massive_dos.py -v -c conf_file.cnf [-c conf2.cnf ...] -r <remote ip>
# act as a client connecting to <remote-host-ip>
##
## * $ python massive_dos.py -lv -c conf_file.cnf [-c conf2.cnf ...] -r <remote ip>
##
## act as a server listening on 0.0.0.0 and accepting replies from <remote-host-ip>
##
## Example configuration file: MassiveDos.cnf
## Pre-conditions: known valid FQ-CSID
def main(argv=None):
'''Command line options.'''
program_name = os.path.basename(sys.argv[0])
program_version = "v0.1"
program_version_string = '%%prog %s' % (program_version)
program_license = "Copyright 2017 Rosalia d'Alessandro\
Licensed under the Apache License 2.0\
nhttp://www.apache.org/licenses/LICENSE-2.0"
if argv is None:
argv = sys.argv[1:]
lstn = None
try:
# setup option parser
parser = OptionParser(version=program_version_string, description=program_license)
parser.add_option("-v", "--verbose", dest="verbose", action="count", help="set verbosity level [default: %default]")
parser.add_option("-c", "--config", dest="config_file", help="the configuration file")
parser.add_option("-r", "--remote_net", dest="remote_net",
help="remote network e.g. 10.0.0.0/24, 10.0.0.1/32")
parser.add_option("-l", "--listening", dest = "listening_mode",
action = "count", help = "start also a GTP_C listener")
# set defaults
parser.set_defaults(listening_mode=False,
config_file="../config/MassiveDoS.cnf",
verbose = False)
# process options
(opts, args) = parser.parse_args(argv)
is_verbose = False
listening_mode = opts.listening_mode
msg_freq = DEFAULT_SLEEPTIME
remote_net = opts.remote_net
sleep_time = DEFAULT_SLEEPTIME
if listening_mode and remote_net == None:
print "remote network (e.g. 10.0.0.0/24, 10.0.0.1/32) is required"
return
# MAIN BODY #
if opts.config_file == "" :
print "Error: missed config file"
return
config = parseConfigs(opts.config_file)
msgs = config.get_unpacked_messages()
lstn = MessageHandler(messages = msgs, peer = remote_net,
isVerbose = is_verbose,
listening_mode = listening_mode,
msgs_freq = msg_freq, wait_time = sleep_time)
if lstn :
lstn.daemon = True
lstn.start()
lstn.join()
lstn.stop()
print "Sent %d GTPV2 messages"%len(message_queue)
except Exception, e:
indent = len(program_name) * " "
sys.stderr.write(program_name + ": " + repr(e) + "\n")
sys.stderr.write(indent + " for help use --help")
print "Exception %s"%str(e)
if lstn :
lstn.stop()
return 2
if __name__ == "__main__":
if DEBUG:
sys.argv.append("-v")
sys.exit(main())
|
mit
| -1,503,131,306,233,485,600
| 37.355705
| 124
| 0.607349
| false
| 3.895706
| true
| false
| false
|
bkbilly/AlarmPI
|
alarmcode/logs.py
|
1
|
8304
|
#!/usr/bin/env python
import re
import threading
import time
from datetime import datetime
import pytz
import logging
logging = logging.getLogger('alarmpi')
class Logs():
def __init__(self, wd, logfile, timezone):
self.wd = wd
self.logfile = logfile
try:
self.mytimezone = pytz.timezone(timezone)
except Exception:
logging.exception("Can't find the correct timezone")
self.mytimezone = pytz.utc
self.updateUI = lambda **args: 0
self.limit = 10
self.logtypes = 'all'
def setCallbackUpdateUI(self, callback):
self.updateUI = callback
def setLogFilters(self, limit, logtypes):
""" Sets the global filters for the getSensorsLog method """
self.limit = limit
self.logtypes = logtypes
def writeLog(self, logType, message):
""" Write log events into a file and send the last to UI.
It also uses the timezone from json file to get the local time.
"""
myTimeLog = datetime.now(tz=self.mytimezone)
myTimeLog = myTimeLog.strftime("%Y-%m-%d %H:%M:%S")
logmsg = '({0}) [{1}] {2}\n'.format(logType, myTimeLog, message)
with open(self.logfile, "a") as myfile:
myfile.write(logmsg)
self.updateUI('sensorsLog', self.getSensorsLog(
self.limit, selectTypes=self.logtypes))
def startTrimThread(self, lines=1000):
threadTrimLogFile = threading.Thread(
target=self.trimLogFile,
args=[lines]
)
threadTrimLogFile.daemon = True
threadTrimLogFile.start()
def _convert_timedelta(self, duration):
""" Converts a time difference into human readable format """
days, seconds = duration.days, duration.seconds
hours = days * 24 + seconds // 3600
minutes = (seconds % 3600) // 60
seconds = (seconds % 60)
diffTxt = ""
if days > 0:
diffTxt = "{days} days, {hours} hour, {minutes} min, {seconds} sec"
elif hours > 0:
diffTxt = "{hours} hour, {minutes} min, {seconds} sec"
elif minutes > 0:
diffTxt = "{minutes} min, {seconds} sec"
else:
diffTxt = "{seconds} sec"
diffTxt = diffTxt.format(
days=days, hours=hours, minutes=minutes, seconds=seconds)
return diffTxt
def trimLogFile(self, lines):
""" Trims the log file in an interval of 24 hours to 1000 lines """
# lines = 1000 # Number of lines of logs to keep
repeat_every_n_sec = 86400 # 24 Hours
while True and lines is None and lines > 0:
with open(self.logfile, 'r') as f:
data = f.readlines()
with open(self.logfile, 'w') as f:
f.writelines(data[-lines:])
time.sleep(repeat_every_n_sec)
def getSensorsLog(self, limit=100, fromText=None,
selectTypes='all', filterText=None,
getFormat='text', combineSensors=True):
""" Returns the last n lines if the log file.
If selectTypes is specified, then it returns only this type of logs.
Available types: user_action, sensor,
system, alarm
If the getFormat is specified as json, then it returns it in a
json format (programmer friendly)
"""
# Fix inputs
if (type(limit) != int and limit is not None):
if (limit.isdigit()):
limit = int(limit)
else:
limit = 100
if (type(selectTypes) == str):
selectTypes = selectTypes.split(',')
elif selectTypes is None:
selectTypes = 'all'.split(',')
if (type(combineSensors) != bool and combineSensors is not None):
if (combineSensors.lower() == 'true'):
combineSensors = True
elif (combineSensors.lower() == 'false'):
combineSensors = False
else:
combineSensors = True
if getFormat is None:
getFormat = 'text'
# Read from File the Logs
logs = []
with open(self.logfile, "r") as f:
lines = f.readlines()
startedSensors = {}
for line in lines:
logType = None
logTime = None
logText = None
# Analyze log line for each category
try:
mymatch = re.match(r'^\((.*)\) \[(.*)\] (.*)', line)
if mymatch:
logType = mymatch.group(1).split(',')
logTime = mymatch.group(2)
logText = mymatch.group(3)
except Exception:
logging.exception("Can't find the correct log group:")
mymatch = re.match(r'^\[(.*)\] (.*)', line)
if mymatch:
logType = ["unknown", "unknown"]
logTime = mymatch.group(1)
logText = mymatch.group(2)
# append them to a list
if logType is not None and logTime is not None and logText is not None:
logs.append({
'type': logType,
'event': logText,
'time': logTime
})
# Add endtime to the sensors
if (combineSensors):
tmplogs = []
index = 0
startedSensors = {}
for log in logs:
if 'sensor' in log['type'][0].lower():
status, uuid = log['type'][1], log['type'][2]
if status == 'on' and uuid not in startedSensors:
startedSensors[uuid] = {
'start': log['time'],
'ind': index
}
index += 1
tmplogs.append(log)
elif status == 'off':
try:
info = startedSensors.pop(uuid, None)
if info is not None:
starttime = datetime.strptime(
info['start'], "%Y-%m-%d %H:%M:%S")
endtime = datetime.strptime(
log['time'], "%Y-%m-%d %H:%M:%S")
timediff = self._convert_timedelta(endtime - starttime)
tmplogs[info['ind']]['timediff'] = timediff
tmplogs[info['ind']]['timeend'] = log['time']
except Exception:
logging.exception("Error combining logs")
logging.error(info)
else:
index += 1
tmplogs.append(log)
logs = tmplogs
# Filter from last found text till the end (e.g. Alarm activated)
if (fromText not in (None, 'all')):
tmplogs = []
index = 0
for log in reversed(logs):
index += 1
if (fromText.lower() in log['event'].lower()):
break
logs = logs[-index:]
# Filter by Types (e.g. sensor, user_action, ...)
if (selectTypes is not None):
if ('all' not in selectTypes):
tmplogs = []
for log in logs:
if (log['type'][0].lower() in selectTypes):
tmplogs.append(log)
logs = tmplogs
# Filter by text (e.g. pir, ...)
if (filterText not in (None, 'all')):
tmplogs = []
for log in logs:
if (filterText.lower() in log['event'].lower()):
tmplogs.append(log)
logs = tmplogs
# Convert to Human format
if (getFormat == 'text'):
tmplogs = []
for log in logs:
if ('timediff' in log):
tmplogs.append('[{0}] ({1}) {2}'.format(log['timeend'], log['timediff'], log['event']))
else:
tmplogs.append('[{0}] {1}'.format(log['time'], log['event']))
logs = tmplogs
return {"log": logs[-limit:]}
|
mit
| 9,113,431,894,522,070,000
| 36.071429
| 107
| 0.486031
| false
| 4.464516
| false
| false
| false
|
weichweich/Pi-Timeswitch
|
Flask-Server/timeswitch/switch/schema.py
|
1
|
2441
|
import logging
import time
from flask import request
from flask_restful import Resource
from marshmallow import ValidationError, post_load, validates_schema
from marshmallow_jsonapi import Schema, fields
from timeswitch.switch.model import (Pin, Sequence, is_absolute_time,
is_relative_time)
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
LOGGER = logging.getLogger(__name__)
class AppError(Exception):
pass
def dasherize(text):
return text.replace('_', '-')
class SequenceSchema(Schema):
id = fields.String(dump_only=True)
start_time = fields.String(required=True)
start_range = fields.String(required=True)
end_time = fields.String(required=True)
end_range = fields.String(required=True)
pin = fields.Relationship(
related_url='/api/pins/{pin_id}',
related_url_kwargs={'pin_id': '<pin>'},
# Include resource linkage
many=False, include_data=True,
type_='pins'
)
@post_load
def make_sequence(self, data):
return Sequence(**data)
def handle_error(self, exc, data):
raise ValidationError(
'An error occurred with input: {0} \n {1}'.format(data, exc.messages))
def __str__(self):
if self.pin is None:
return "<Sequence: Start " + self.start_time + " End " +\
self.end_time + " Pin none>"
else:
return "<Sequence: Start " + self.start_time + " End " +\
self.end_time + " Pin " + str(self.pin) + ">"
class Meta:
type_ = 'sequences'
strict = True
class PinSchema(Schema):
id = fields.Str(dump_only=True)
number = fields.Integer(required=True)
name = fields.String(attribute='name')
state = fields.Integer()
sequences = fields.Relationship(
related_url='/api/pins/{pin_id}/sequences',
related_url_kwargs={'pin_id': '<id>'},
# Include resource linkage
many=True,
include_data=True,
type_='sequences',
schema='SequenceSchema'
)
@post_load
def make_pin(self, data):
return Pin(**data)
def handle_error(self, exc, data):
raise ValidationError(
'An error occurred with input: {0} \n {1}'.format(data, exc.messages))
class Meta:
type_ = 'pins'
strict = True
|
mit
| 4,909,760,856,457,402,000
| 25.824176
| 82
| 0.60508
| false
| 3.880763
| false
| false
| false
|
cmcqueen/cobs-python
|
python3/cobs/cobs/_cobs_py.py
|
1
|
2890
|
"""
Consistent Overhead Byte Stuffing (COBS)
This version is for Python 3.x.
"""
class DecodeError(Exception):
pass
def _get_buffer_view(in_bytes):
mv = memoryview(in_bytes)
if mv.ndim > 1 or mv.itemsize > 1:
raise BufferError('object must be a single-dimension buffer of bytes.')
try:
mv = mv.cast('c')
except AttributeError:
pass
return mv
def encode(in_bytes):
"""Encode a string using Consistent Overhead Byte Stuffing (COBS).
Input is any byte string. Output is also a byte string.
Encoding guarantees no zero bytes in the output. The output
string will be expanded slightly, by a predictable amount.
An empty string is encoded to '\\x01'"""
if isinstance(in_bytes, str):
raise TypeError('Unicode-objects must be encoded as bytes first')
in_bytes_mv = _get_buffer_view(in_bytes)
final_zero = True
out_bytes = bytearray()
idx = 0
search_start_idx = 0
for in_char in in_bytes_mv:
if in_char == b'\x00':
final_zero = True
out_bytes.append(idx - search_start_idx + 1)
out_bytes += in_bytes_mv[search_start_idx:idx]
search_start_idx = idx + 1
else:
if idx - search_start_idx == 0xFD:
final_zero = False
out_bytes.append(0xFF)
out_bytes += in_bytes_mv[search_start_idx:idx+1]
search_start_idx = idx + 1
idx += 1
if idx != search_start_idx or final_zero:
out_bytes.append(idx - search_start_idx + 1)
out_bytes += in_bytes_mv[search_start_idx:idx]
return bytes(out_bytes)
def decode(in_bytes):
"""Decode a string using Consistent Overhead Byte Stuffing (COBS).
Input should be a byte string that has been COBS encoded. Output
is also a byte string.
A cobs.DecodeError exception will be raised if the encoded data
is invalid."""
if isinstance(in_bytes, str):
raise TypeError('Unicode-objects are not supported; byte buffer objects only')
in_bytes_mv = _get_buffer_view(in_bytes)
out_bytes = bytearray()
idx = 0
if len(in_bytes_mv) > 0:
while True:
length = ord(in_bytes_mv[idx])
if length == 0:
raise DecodeError("zero byte found in input")
idx += 1
end = idx + length - 1
copy_mv = in_bytes_mv[idx:end]
if b'\x00' in copy_mv:
raise DecodeError("zero byte found in input")
out_bytes += copy_mv
idx = end
if idx > len(in_bytes_mv):
raise DecodeError("not enough input bytes for length code")
if idx < len(in_bytes_mv):
if length < 0xFF:
out_bytes.append(0)
else:
break
return bytes(out_bytes)
|
mit
| 147,667,922,598,717,340
| 31.111111
| 86
| 0.577855
| false
| 3.832891
| false
| false
| false
|
tengqm/senlin
|
senlin/tests/engine/test_policy_types.py
|
1
|
2469
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_messaging.rpc import dispatcher as rpc
from senlin.common import exception
from senlin.engine import environment
from senlin.engine import service
from senlin.tests.common import base
from senlin.tests.common import utils
from senlin.tests import fakes
class PolicyTypeTest(base.SenlinTestCase):
def setUp(self):
super(PolicyTypeTest, self).setUp()
self.ctx = utils.dummy_context(tenant_id='policy_type_test_tenant')
self.eng = service.EngineService('host-a', 'topic-a')
self.eng.init_tgm()
environment.global_env().register_policy('TestPolicy',
fakes.TestPolicy)
def test_policy_type_list(self):
types = self.eng.policy_type_list(self.ctx)
self.assertIsInstance(types, list)
self.assertIn({'name': 'TestPolicy'}, types)
self.assertNotIn({'name': 'some-weird-stuff'}, types)
def test_policy_type_schema(self):
type_name = 'TestPolicy'
expected = {
'spec': {
'KEY1': {
'type': 'String',
'required': False,
'required': False,
'description': 'key1',
'default': 'default1',
},
'KEY2': {
'type': 'Integer',
'required': False,
'description': 'key2',
'default': 1,
},
}
}
schema = self.eng.policy_type_schema(self.ctx, type_name=type_name)
self.assertEqual(expected, schema)
def test_policy_type_schema_nonexist(self):
ex = self.assertRaises(rpc.ExpectedException,
self.eng.policy_type_schema,
self.ctx, type_name='Bogus')
self.assertEqual(exception.PolicyTypeNotFound, ex.exc_info[0])
|
apache-2.0
| 4,855,372,240,069,778,000
| 36.409091
| 75
| 0.592143
| false
| 4.256897
| true
| false
| false
|
gregdetre/abracadjabra
|
abracadjabra/views.py
|
1
|
3115
|
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.models import User
from django.db.models import Sum, Count
from django.http import Http404
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
from exceptions import SlugAttributeError
from models import Experiment, ExperimentUser
from utils.dt import dt_ranges, recent_day, recent_week
@staff_member_required
def experiments_vw(request):
active_experiments = Experiment.active.all()
inactive_experiments = Experiment.inactive.all()
analyses = [] # Analysis.get_all_analyses()
nAnalyses = len(analyses)
return render_to_response('abracadjabra/experiments.html',
{'active_experiments': active_experiments,
'inactive_experiments': inactive_experiments,
'analyses': analyses,
'nExperiments': Experiment.objects.count(),
'nExperimentsActive': active_experiments.count(),
'nExperimentsInactive': inactive_experiments.count(),
'nAnalyses': nAnalyses,},
context_instance=RequestContext(request))
@staff_member_required
def experiment_detail_vw(request, experiment_id):
dt_joined_str = request.GET.get('dt_joined', 'recent_week')
dt_joined = dt_ranges[dt_joined_str][0] # e.g. recent_week()
# use .objects to allow inactive Experiments to still be viewable
expt = get_object_or_404(Experiment, id=experiment_id)
buckets, dt_joined = expt.compute_buckets(dt_joined=dt_joined)
last_exptuser = ExperimentUser.get_latest(expt)
return render_to_response('abracadjabra/experiment_detail.html',
{'expt': expt,
'buckets': buckets,
'dt_joined': dt_joined,
'last_ran': last_exptuser.cre,},
context_instance=RequestContext(request))
@staff_member_required
def analysis_detail_vw(request, analysis_slug):
dt_joined_str = request.GET.get('dt_joined', 'recent_week')
dt_joined = dt_ranges[dt_joined_str][0] # e.g. recent_week()
try:
analysis = Analysis(analysis_slug, dt_joined)
# and send it by email 60s later, in case this times out
# send_analysis_mail.apply_async(args=[analysis_slug, analysis.dt_joined],
# countdown=60)
analysis.run()
except SlugAttributeError:
raise Http404
# for some reason, some of these variables are outside the EXPT scope in experiment_detail.html
context = {'expt': analysis.as_dict(),
'dt_joined': analysis.dt_joined,
'last_ran': None,
'buckets': analysis.buckets,}
return render_to_response('abracadjabra/analysis_detail.html',
context,
context_instance=RequestContext(request))
|
mit
| -9,047,564,593,219,679,000
| 44.808824
| 99
| 0.614446
| false
| 4.215156
| false
| false
| false
|
jgravois/ArcREST
|
src/arcrest/agol/layer.py
|
1
|
54466
|
"""
.. module:: layer
:platform: Windows, Linux
:synopsis: Class that contians feature service layer information.
.. moduleauthor:: Esri
"""
from .._abstract import abstract
from ..security import security
import types
from ..common import filters
from ..common.geometry import SpatialReference
from ..common.general import _date_handler, _unicode_convert, Feature
from ..common.spatial import scratchFolder, scratchGDB, json_to_featureclass
from ..common.spatial import get_OID_field, get_records_with_attachments
from ..common.spatial import create_feature_layer, merge_feature_class
from ..common.spatial import featureclass_to_json, create_feature_class
from ..common.spatial import get_attachment_data
from ..common.general import FeatureSet
from ..hostedservice import AdminFeatureServiceLayer
import featureservice
import os
import json
import math
import urlparse
import mimetypes
import uuid
from re import search
from urlparse import urlparse
########################################################################
class FeatureLayer(abstract.BaseAGOLClass):
"""
This contains information about a feature service's layer.
"""
_objectIdField = None
_allowGeometryUpdates = None
_globalIdField = None
_token_url = None
_currentVersion = None
_id = None
_name = None
_type = None
_description = None
_definitionExpression = None
_geometryType = None
_hasZ = None
_hasM = None
_copyrightText = None
_parentLayer = None
_subLayers = None
_minScale = None
_maxScale = None
_effectiveMinScale = None
_effectiveMaxScale = None
_defaultVisibility = None
_extent = None
_timeInfo = None
_drawingInfo = None
_hasAttachments = None
_htmlPopupType = None
_displayField = None
_typeIdField = None
_fields = None
_types = None # sub-types
_relationships = None
_maxRecordCount = None
_canModifyLayer = None
_supportsValidateSql = None
_supportsCoordinatesQuantization = None
_supportsStatistics = None
_supportsAdvancedQueries = None
_hasLabels = None
_canScaleSymbols = None
_capabilities = None
_supportedQueryFormats = None
_isDataVersioned = None
_ownershipBasedAccessControlForFeatures = None
_useStandardizedQueries = None
_templates = None
_indexes = None
_hasStaticData = None
_supportsRollbackOnFailureParameter = None
_advancedQueryCapabilities = None
_editingInfo = None
_proxy_url = None
_proxy_port = None
_securityHandler = None
_supportsCalculate = None
_supportsAttachmentsByUploadId = None
_editFieldsInfo = None
_serverURL = None
_supportsValidateSql = None
_supportsCoordinatesQuantization = None
#----------------------------------------------------------------------
def __init__(self, url,
securityHandler=None,
initialize=False,
proxy_url=None,
proxy_port=None):
"""Constructor"""
self._url = url
self._proxy_port = proxy_port
self._proxy_url = proxy_url
if securityHandler is not None and \
isinstance(securityHandler, abstract.BaseSecurityHandler):
self._securityHandler = securityHandler
if not securityHandler.referer_url is None:
self._referer_url = securityHandler.referer_url
if initialize:
self.__init()
#----------------------------------------------------------------------
def __init(self):
""" initializes the service """
params = {
"f" : "json",
}
json_dict = self._do_get(self._url, params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_')]
for k,v in json_dict.iteritems():
if k in attributes:
setattr(self, "_"+ k, json_dict[k])
else:
print k, " - attribute not implemented in Feature Layer."
self._parentLayer = featureservice.FeatureService(
url=os.path.dirname(self._url),
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def refresh(self):
"""refreshes all the properties of the service"""
self.__init()
#----------------------------------------------------------------------
def __str__(self):
""" returns object as string """
return json.dumps(dict(self), default=_date_handler)
#----------------------------------------------------------------------
def __iter__(self):
""" iterator generator for public values/properties
It only returns the properties that are public.
"""
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_') and \
not isinstance(getattr(self, attr), (types.MethodType,
types.BuiltinFunctionType,
types.BuiltinMethodType))
]
for att in attributes:
yield (att, getattr(self, att))
#----------------------------------------------------------------------
@property
def url(self):
""" returns the url for the feature layer"""
return self._url
#----------------------------------------------------------------------
@property
def administration(self):
"""returns the hostservice object to manage the back-end functions"""
url = self._url
res = search("/rest/", url).span()
addText = "admin/"
part1 = url[:res[1]]
part2 = url[res[1]:]
adminURL = "%s%s%s" % (part1, addText, part2)
res = AdminFeatureServiceLayer(url=adminURL,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
initialize=True)
return res
#----------------------------------------------------------------------
@property
def supportsValidateSql(self):
""" returns the supports calculate values """
if self._supportsValidateSql is None:
self.__init()
return self._supportsValidateSql
#----------------------------------------------------------------------
@property
def supportsCoordinatesQuantization(self):
""" returns the supports calculate values """
if self._supportsCoordinatesQuantization is None:
self.__init()
return self._supportsCoordinatesQuantization
#----------------------------------------------------------------------
@property
def supportsCalculate(self):
""" returns the supports calculate values """
if self._supportsCalculate is None:
self.__init()
return self._supportsCalculate
#----------------------------------------------------------------------
@property
def editFieldsInfo(self):
""" returns edit field info """
if self._editFieldsInfo is None:
self.__init()
return self._editFieldsInfo
#----------------------------------------------------------------------
@property
def supportsAttachmentsByUploadId(self):
""" returns is supports attachments by uploads id """
if self._supportsAttachmentsByUploadId is None:
self.__init()
return self._supportsAttachmentsByUploadId
#----------------------------------------------------------------------
@property
def editingInfo(self):
""" returns the edit information """
if self._editingInfo is None:
self.__init()
return self._editingInfo
#----------------------------------------------------------------------
@property
def advancedQueryCapabilities(self):
""" returns the advanced query capabilities """
if self._advancedQueryCapabilities is None:
self.__init()
return self._advancedQueryCapabilities
#----------------------------------------------------------------------
@property
def supportsRollbackOnFailureParameter(self):
""" returns if rollback on failure supported """
if self._supportsRollbackOnFailureParameter is None:
self.__init()
return self._supportsRollbackOnFailureParameter
#----------------------------------------------------------------------
@property
def hasStaticData(self):
"""boolean T/F if static data is present """
if self._hasStaticData is None:
self.__init()
return self._hasStaticData
#----------------------------------------------------------------------
@property
def indexes(self):
"""gets the indexes"""
if self._indexes is None:
self.__init()
return self._indexes
#----------------------------------------------------------------------
@property
def templates(self):
""" gets the template """
if self._templates is None:
self.__init()
return self._templates
#----------------------------------------------------------------------
@property
def allowGeometryUpdates(self):
""" returns boolean if geometry updates are allowed """
if self._allowGeometryUpdates is None:
self.__init()
return self._allowGeometryUpdates
#----------------------------------------------------------------------
@property
def globalIdField(self):
""" returns the global id field """
if self._globalIdField is None:
self.__init()
return self._globalIdField
#----------------------------------------------------------------------
@property
def objectIdField(self):
if self._objectIdField is None:
self.__init()
return self._objectIdField
#----------------------------------------------------------------------
@property
def currentVersion(self):
""" returns the current version """
if self._currentVersion is None:
self.__init()
return self._currentVersion
#----------------------------------------------------------------------
@property
def id(self):
""" returns the id """
if self._id is None:
self.__init()
return self._id
#----------------------------------------------------------------------
@property
def name(self):
""" returns the name """
if self._name is None:
self.__init()
return self._name
#----------------------------------------------------------------------
@property
def type(self):
""" returns the type """
if self._type is None:
self.__init()
return self._type
#----------------------------------------------------------------------
@property
def description(self):
""" returns the layer's description """
if self._description is None:
self.__init()
return self._description
#----------------------------------------------------------------------
@property
def definitionExpression(self):
"""returns the definitionExpression"""
if self._definitionExpression is None:
self.__init()
return self._definitionExpression
#----------------------------------------------------------------------
@property
def geometryType(self):
"""returns the geometry type"""
if self._geometryType is None:
self.__init()
return self._geometryType
#----------------------------------------------------------------------
@property
def hasZ(self):
""" returns if it has a Z value or not """
if self._hasZ is None:
self.__init()
return self._hasZ
#----------------------------------------------------------------------
@property
def hasM(self):
""" returns if it has a m value or not """
if self._hasM is None:
self.__init()
return self._hasM
#----------------------------------------------------------------------
@property
def copyrightText(self):
""" returns the copyright text """
if self._copyrightText is None:
self.__init()
return self._copyrightText
#----------------------------------------------------------------------
@property
def parentLayer(self):
""" returns information about the parent """
if self._parentLayer is None:
self.__init()
return self._parentLayer
#----------------------------------------------------------------------
@property
def subLayers(self):
""" returns sublayers for layer """
if self._subLayers is None:
self.__init()
return self._subLayers
#----------------------------------------------------------------------
@property
def minScale(self):
""" minimum scale layer will show """
if self._minScale is None:
self.__init()
return self._minScale
@property
def maxScale(self):
""" sets the max scale """
if self._maxScale is None:
self.__init()
return self._maxScale
@property
def effectiveMinScale(self):
""" returns the effective minimum scale value """
if self._effectiveMinScale is None:
self.__init()
return self._effectiveMinScale
@property
def effectiveMaxScale(self):
""" returns the effective maximum scale value """
if self._effectiveMaxScale is None:
self.__init()
return self._effectiveMaxScale
@property
def defaultVisibility(self):
""" returns the default visibility of the layer """
if self._defaultVisibility is None:
self.__init()
return self._defaultVisibility
@property
def extent(self):
""" returns the extent """
if self._extent is None:
self.__init()
return self._extent
@property
def timeInfo(self):
""" returns the time information about the layer """
if self._timeInfo is None:
self.__init()
return self._timeInfo
@property
def drawingInfo(self):
""" returns the symbol information about the layer """
if self._drawingInfo is None:
self.__init()
return self._drawingInfo
@property
def hasAttachments(self):
""" boolean that tells if attachments are associated with layer """
if self._hasAttachments is None:
self.__init()
return self._hasAttachments
@property
def htmlPopupType(self):
""" returns the popup type """
if self._htmlPopupType is None:
self.__init()
return self._htmlPopupType
@property
def displayField(self):
""" returns the primary display field """
if self._displayField is None:
self.__init()
return self._displayField
@property
def typeIdField(self):
""" returns the type Id field """
if self._typeIdField is None:
self.__init()
return self._typeIdField
@property
def fields(self):
""" returns the layer's fields """
if self._fields is None:
self.__init()
return self._fields
@property
def types(self):
""" returns the types """
if self._types is None:
self.__init()
return self._types
@property
def relationships(self):
""" returns the relationships for the layer """
if self._relationships is None:
self.__init()
return self._relationships
@property
def maxRecordCount(self):
""" returns the maximum returned records """
if self._maxRecordCount is None:
self.__init()
if self._maxRecordCount is None:
self._maxRecordCount = 1000
return self._maxRecordCount
@property
def canModifyLayer(self):
""" returns boolean to say if layer can be modified """
if self._canModifyLayer is None:
self.__init()
return self._canModifyLayer
@property
def supportsStatistics(self):
""" boolean to if supports statistics """
if self._supportsStatistics is None:
self.__init()
return self._supportsStatistics
@property
def supportsAdvancedQueries(self):
""" boolean value if advanced queries is supported """
if self._supportsAdvancedQueries is None:
self.__init()
return self._supportsAdvancedQueries
@property
def hasLabels(self):
""" returns if layer has labels on or not """
if self._hasLabels is None:
self.__init()
return self._hasLabels
@property
def canScaleSymbols(self):
""" states if symbols can scale """
if self._canScaleSymbols is None:
self.__init()
return self._canScaleSymbols
@property
def capabilities(self):
""" operations that can be performed on layer """
if self._capabilities is None:
self.__init()
return self._capabilities
@property
def supportedQueryFormats(self):
""" returns supported query formats """
if self._supportedQueryFormats is None:
self.__init()
return self._supportedQueryFormats
@property
def isDataVersioned(self):
""" returns boolean if data is in version control """
if self._isDataVersioned is None:
self.__init()
return self._isDataVersioned
@property
def ownershipBasedAccessControlForFeatures(self):
""" returns value for owernship based access control """
if self._ownershipBasedAccessControlForFeatures is None:
self.__init()
return self._ownershipBasedAccessControlForFeatures
@property
def useStandardizedQueries(self):
""" returns value if standardized queries can be used """
if self._useStandardizedQueries is None:
self.__init()
return self._useStandardizedQueries
#----------------------------------------------------------------------
@property
def securityHandler(self):
""" gets the security handler """
return self._securityHandler
#----------------------------------------------------------------------
@securityHandler.setter
def securityHandler(self, value):
""" sets the security handler """
if isinstance(value, abstract.BaseSecurityHandler):
if isinstance(value, security.AGOLTokenSecurityHandler):
self._securityHandler = value
self._token = value.token
self._username = value.username
self._password = value._password
self._token_url = value.token_url
elif isinstance(value, security.OAuthSecurityHandler):
self._token = value.token
self._securityHandler = value
else:
pass
#----------------------------------------------------------------------
def addAttachment(self, oid, file_path):
""" Adds an attachment to a feature service
Input:
oid - string - OBJECTID value to add attachment to
file_path - string - path to file
Output:
JSON Repsonse
"""
if self.hasAttachments == True:
attachURL = self._url + "/%s/addAttachment" % oid
params = {'f':'json'}
parsed = urlparse(attachURL)
files = []
files.append(('attachment', file_path, os.path.basename(file_path)))
res = self._post_multipart(host=parsed.hostname,
selector=parsed.path,
securityHandler=self._securityHandler,
files=files,
fields=params,
port=parsed.port,
ssl=parsed.scheme.lower() == 'https',
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
return self._unicode_convert(res)
else:
return "Attachments are not supported for this feature service."
#----------------------------------------------------------------------
def deleteAttachment(self, oid, attachment_id):
""" removes an attachment from a feature service feature
Input:
oid - integer or string - id of feature
attachment_id - integer - id of attachment to erase
Output:
JSON response
"""
url = self._url + "/%s/deleteAttachments" % oid
params = {
"f":"json",
"attachmentIds" : "%s" % attachment_id
}
return self._do_post(url, params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def updateAttachment(self, oid, attachment_id, file_path):
""" updates an existing attachment with a new file
Inputs:
oid - string/integer - Unique record ID
attachment_id - integer - Unique attachment identifier
file_path - string - path to new attachment
Output:
JSON response
"""
url = self._url + "/%s/updateAttachment" % oid
params = {
"f":"json",
"attachmentId" : "%s" % attachment_id
}
parsed = urlparse(url)
port = parsed.port
files = []
files.append(('attachment', file_path, os.path.basename(file_path)))
res = self._post_multipart(host=parsed.hostname,
selector=parsed.path,
files=files,
port=port,
fields=params,
securityHandler=self._securityHandler,
ssl=parsed.scheme.lower() == 'https',
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
return self._unicode_convert(res)
#----------------------------------------------------------------------
def listAttachments(self, oid):
""" list attachements for a given OBJECT ID """
url = self._url + "/%s/attachments" % oid
params = {
"f":"json"
}
return self._do_get(url, params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def create_fc_template(self, out_path, out_name):
"""creates a featureclass template on local disk"""
fields = self.fields
objectIdField = self.objectIdField
geomType = self.geometryType
wkid = self.parentLayer.spatialReference['wkid']
return create_feature_class(out_path,
out_name,
geomType,
wkid,
fields,
objectIdField)
def create_feature_template(self):
"""creates a feature template"""
fields = self.fields
feat_schema = {}
att = {}
for fld in fields:
self._globalIdField
if not fld['name'] == self._objectIdField and not fld['name'] == self._globalIdField:
att[fld['name']] = ''
feat_schema['attributes'] = att
feat_schema['geometry'] = ''
return Feature(feat_schema)
#----------------------------------------------------------------------
def query(self,
where="1=1",
out_fields="*",
timeFilter=None,
geometryFilter=None,
returnGeometry=True,
returnIDsOnly=False,
returnCountOnly=False,
returnFeatureClass=False,
out_fc=None):
""" queries a feature service based on a sql statement
Inputs:
where - the selection sql statement
out_fields - the attribute fields to return
timeFilter - a TimeFilter object where either the start time
or start and end time are defined to limit the
search results for a given time. The values in
the timeFilter should be as UTC timestampes in
milliseconds. No checking occurs to see if they
are in the right format.
geometryFilter - a GeometryFilter object to parse down a given
query by another spatial dataset.
returnGeometry - true means a geometry will be returned,
else just the attributes
returnIDsOnly - false is default. True means only OBJECTIDs
will be returned
returnCountOnly - if True, then an integer is returned only
based on the sql statement
returnFeatureClass - Default False. If true, query will be
returned as feature class
out_fc - only valid if returnFeatureClass is set to True.
Output location of query.
Output:
A list of Feature Objects (default) or a path to the output featureclass if
returnFeatureClass is set to True.
"""
params = {"f": "json",
"where": where,
"outFields": out_fields,
"returnGeometry" : returnGeometry,
"returnIdsOnly" : returnIDsOnly,
"returnCountOnly" : returnCountOnly,
}
if not timeFilter is None and \
isinstance(timeFilter, filters.TimeFilter):
params['time'] = timeFilter.filter
if not geometryFilter is None and \
isinstance(geometryFilter, filters.GeometryFilter):
gf = geometryFilter.filter
params['geometry'] = gf['geometry']
params['geometryType'] = gf['geometryType']
params['spatialRelationship'] = gf['spatialRel']
params['inSR'] = gf['inSR']
fURL = self._url + "/query"
results = self._do_get(fURL, params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
if 'error' in results:
raise ValueError (results)
if not returnCountOnly and not returnIDsOnly:
if returnFeatureClass:
json_text = json.dumps(results)
temp = scratchFolder() + os.sep + uuid.uuid4().get_hex() + ".json"
with open(temp, 'wb') as writer:
writer.write(json_text)
writer.flush()
del writer
fc = json_to_featureclass(json_file=temp,
out_fc=out_fc)
os.remove(temp)
return fc
else:
return FeatureSet.fromJSON(json.dumps(results))
else:
return results
return
#----------------------------------------------------------------------
def query_related_records(self,
objectIds,
relationshipId,
outFields="*",
definitionExpression=None,
returnGeometry=True,
maxAllowableOffset=None,
geometryPrecision=None,
outWKID=None,
gdbVersion=None,
returnZ=False,
returnM=False):
"""
The Query operation is performed on a feature service layer
resource. The result of this operation are feature sets grouped
by source layer/table object IDs. Each feature set contains
Feature objects including the values for the fields requested by
the user. For related layers, if you request geometry
information, the geometry of each feature is also returned in
the feature set. For related tables, the feature set does not
include geometries.
Inputs:
objectIds - the object IDs of the table/layer to be queried
relationshipId - The ID of the relationship to be queried.
outFields - the list of fields from the related table/layer
to be included in the returned feature set. This
list is a comma delimited list of field names. If
you specify the shape field in the list of return
fields, it is ignored. To request geometry, set
returnGeometry to true.
You can also specify the wildcard "*" as the
value of this parameter. In this case, the result
s will include all the field values.
definitionExpression - The definition expression to be
applied to the related table/layer.
From the list of objectIds, only those
records that conform to this
expression are queried for related
records.
returnGeometry - If true, the feature set includes the
geometry associated with each feature. The
default is true.
maxAllowableOffset - This option can be used to specify the
maxAllowableOffset to be used for
generalizing geometries returned by the
query operation. The maxAllowableOffset
is in the units of the outSR. If outSR
is not specified, then
maxAllowableOffset is assumed to be in
the unit of the spatial reference of the
map.
geometryPrecision - This option can be used to specify the
number of decimal places in the response
geometries.
outWKID - The spatial reference of the returned geometry.
gdbVersion - The geodatabase version to query. This parameter
applies only if the isDataVersioned property of
the layer queried is true.
returnZ - If true, Z values are included in the results if
the features have Z values. Otherwise, Z values are
not returned. The default is false.
returnM - If true, M values are included in the results if
the features have M values. Otherwise, M values are
not returned. The default is false.
"""
params = {
"f" : "json",
"objectIds" : objectIds,
"relationshipId" : relationshipId,
"outFields" : outFields,
"returnGeometry" : returnGeometry,
"returnM" : returnM,
"returnZ" : returnZ
}
if gdbVersion is not None:
params['gdbVersion'] = gdbVersion
if definitionExpression is not None:
params['definitionExpression'] = definitionExpression
if outWKID is not None:
params['outSR'] = SpatialReference(outWKID).asDictionary
if maxAllowableOffset is not None:
params['maxAllowableOffset'] = maxAllowableOffset
if geometryPrecision is not None:
params['geometryPrecision'] = geometryPrecision
quURL = self._url + "/queryRelatedRecords"
res = self._do_get(url=quURL, param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
return res
#----------------------------------------------------------------------
def getHTMLPopup(self, oid):
"""
The htmlPopup resource provides details about the HTML pop-up
authored by the user using ArcGIS for Desktop.
Input:
oid - object id of the feature where the HTML pop-up
Output:
"""
if self.htmlPopupType != "esriServerHTMLPopupTypeNone":
popURL = self._url + "/%s/htmlPopup" % oid
params = {
'f' : "json"
}
return self._do_get(url=popURL, param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
return ""
#----------------------------------------------------------------------
def _chunks(self, l, n):
""" Yield n successive chunks from a list l.
"""
l.sort()
newn = int(1.0 * len(l) / n + 0.5)
for i in xrange(0, n-1):
yield l[i*newn:i*newn+newn]
yield l[n*newn-newn:]
#----------------------------------------------------------------------
def get_local_copy(self, out_path, includeAttachments=False):
""" exports the whole feature service to a feature class
Input:
out_path - path to where the data will be placed
includeAttachments - default False. If sync is not supported
then the paramter is ignored.
Output:
path to exported feature class or fgdb (as list)
"""
if self.hasAttachments and \
self.parentLayer.syncEnabled:
return self.parentLayer.createReplica(replicaName="fgdb_dump",
layers="%s" % self.id,
returnAsFeatureClass=True,
returnAttachments=includeAttachments,
out_path=out_path)[0]
elif self.hasAttachments == False and \
self.parentLayer.syncEnabled:
return self.parentLayer.createReplica(replicaName="fgdb_dump",
layers="%s" % self.id,
returnAsFeatureClass=True,
out_path=out_path)[0]
else:
result_features = []
res = self.query(returnIDsOnly=True)
OIDS = res['objectIds']
OIDS.sort()
OIDField = res['objectIdFieldName']
count = len(OIDS)
if count <= self.maxRecordCount:
bins = 1
else:
bins = count / self.maxRecordCount
v = count % self.maxRecordCount
if v > 0:
bins += 1
chunks = self._chunks(OIDS, bins)
for chunk in chunks:
chunk.sort()
sql = "%s >= %s and %s <= %s" % (OIDField, chunk[0],
OIDField, chunk[len(chunk) -1])
temp_base = "a" + uuid.uuid4().get_hex()[:6] + "a"
temp_fc = r"%s\%s" % (scratchGDB(), temp_base)
temp_fc = self.query(where=sql,
returnFeatureClass=True,
out_fc=temp_fc)
result_features.append(temp_fc)
return merge_feature_class(merges=result_features,
out_fc=out_path)
#----------------------------------------------------------------------
def updateFeature(self,
features,
gdbVersion=None,
rollbackOnFailure=True):
"""
updates an existing feature in a feature service layer
Input:
feature - feature object(s) to get updated. A single
feature, a list of feature objects can be passed,
or a FeatureSet object.
Output:
dictionary of result messages
"""
params = {
"f" : "json",
"rollbackOnFailure" : rollbackOnFailure
}
if gdbVersion is not None:
params['gdbVersion'] = gdbVersion
if isinstance(features, Feature):
params['features'] = json.dumps([features.asDictionary],
default=_date_handler
)
elif isinstance(features, list):
vals = []
for feature in features:
if isinstance(feature, Feature):
vals.append(feature.asDictionary)
params['features'] = json.dumps(vals,
default=_date_handler
)
elif isinstance(features, FeatureSet):
params['features'] = json.dumps(
[feature.asDictionary for feature in features.features],
default=_date_handler
)
else:
return {'message' : "invalid inputs"}
updateURL = self._url + "/updateFeatures"
res = self._do_post(url=updateURL,
securityHandler=self._securityHandler,
param_dict=params, proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
return res
#----------------------------------------------------------------------
def deleteFeatures(self,
objectIds="",
where="",
geometryFilter=None,
gdbVersion=None,
rollbackOnFailure=True
):
""" removes 1:n features based on a sql statement
Input:
objectIds - The object IDs of this layer/table to be deleted
where - A where clause for the query filter. Any legal SQL
where clause operating on the fields in the layer is
allowed. Features conforming to the specified where
clause will be deleted.
geometryFilter - a filters.GeometryFilter object to limit
deletion by a geometry.
gdbVersion - Geodatabase version to apply the edits. This
parameter applies only if the isDataVersioned
property of the layer is true
rollbackOnFailure - parameter to specify if the edits should
be applied only if all submitted edits
succeed. If false, the server will apply
the edits that succeed even if some of
the submitted edits fail. If true, the
server will apply the edits only if all
edits succeed. The default value is true.
Output:
JSON response as dictionary
"""
dURL = self._url + "/deleteFeatures"
params = {
"f": "json",
}
if geometryFilter is not None and \
isinstance(geometryFilter, filters.GeometryFilter):
gfilter = geometryFilter.filter
params['geometry'] = gfilter['geometry']
params['geometryType'] = gfilter['geometryType']
params['inSR'] = gfilter['inSR']
params['spatialRel'] = gfilter['spatialRel']
if where is not None and \
where != "":
params['where'] = where
if objectIds is not None and \
objectIds != "":
params['objectIds'] = objectIds
result = self._do_post(url=dURL, param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
return result
#----------------------------------------------------------------------
def applyEdits(self,
addFeatures=[],
updateFeatures=[],
deleteFeatures=None,
gdbVersion=None,
rollbackOnFailure=True):
"""
This operation adds, updates, and deletes features to the
associated feature layer or table in a single call.
Inputs:
addFeatures - The array of features to be added. These
features should be common.Feature objects
updateFeatures - The array of features to be updateded.
These features should be common.Feature
objects
deleteFeatures - string of OIDs to remove from service
gdbVersion - Geodatabase version to apply the edits.
rollbackOnFailure - Optional parameter to specify if the
edits should be applied only if all
submitted edits succeed. If false, the
server will apply the edits that succeed
even if some of the submitted edits fail.
If true, the server will apply the edits
only if all edits succeed. The default
value is true.
Output:
dictionary of messages
"""
editURL = self._url + "/applyEdits"
params = {"f": "json"
}
if len(addFeatures) > 0 and \
isinstance(addFeatures[0], Feature):
params['adds'] = json.dumps([f.asDictionary for f in addFeatures],
default=_date_handler)
if len(updateFeatures) > 0 and \
isinstance(updateFeatures[0], Feature):
params['updates'] = json.dumps([f.asDictionary for f in updateFeatures],
default=_date_handler)
if deleteFeatures is not None and \
isinstance(deleteFeatures, str):
params['deletes'] = deleteFeatures
return self._do_post(url=editURL, param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def addFeature(self, features,
gdbVersion=None,
rollbackOnFailure=True):
""" Adds a single feature to the service
Inputs:
feature - list of common.Feature object or a single
common.Feature Object or a FeatureSet object
gdbVersion - Geodatabase version to apply the edits
rollbackOnFailure - Optional parameter to specify if the
edits should be applied only if all
submitted edits succeed. If false, the
server will apply the edits that succeed
even if some of the submitted edits fail.
If true, the server will apply the edits
only if all edits succeed. The default
value is true.
Output:
JSON message as dictionary
"""
url = self._url + "/addFeatures"
params = {
"f" : "json"
}
if gdbVersion is not None:
params['gdbVersion'] = gdbVersion
if isinstance(rollbackOnFailure, bool):
params['rollbackOnFailure'] = rollbackOnFailure
if isinstance(features, list):
params['features'] = json.dumps([feature.asDictionary for feature in features],
default=_date_handler)
elif isinstance(features, Feature):
params['features'] = json.dumps([features.asDictionary],
default=_date_handler)
elif isinstance(features, FeatureSet):
params['features'] = json.dumps([feature.asDictionary for feature in feature.features],
default=_date_handler)
else:
return None
return self._do_post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def addFeatures(self, fc, attachmentTable=None,
nameField="ATT_NAME", blobField="DATA",
contentTypeField="CONTENT_TYPE",
rel_object_field="REL_OBJECTID"):
""" adds a feature to the feature service
Inputs:
fc - string - path to feature class data to add.
attachmentTable - string - (optional) path to attachment table
nameField - string - (optional) name of file field in attachment table
blobField - string - (optional) name field containing blob data
contentTypeField - string - (optional) name of field containing content type
rel_object_field - string - (optional) name of field with OID of feature class
Output:
boolean, add results message as list of dictionaries
"""
messages = {'addResults':[]}
if attachmentTable is None:
count = 0
bins = 1
uURL = self._url + "/addFeatures"
max_chunk = 250
js = json.loads(self._unicode_convert(
featureclass_to_json(fc)))
js = js['features']
if len(js) == 0:
return {'addResults':None}
if len(js) <= max_chunk:
bins = 1
else:
bins = int(len(js)/max_chunk)
if len(js) % max_chunk > 0:
bins += 1
chunks = self._chunks(l=js, n=bins)
for chunk in chunks:
params = {
"f" : 'json',
"features" : json.dumps(chunk,
default=self._date_handler)
}
result = self._do_post(url=uURL, param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
if messages is None:
messages = result
else:
if 'addResults' in result:
if 'addResults' in messages:
messages['addResults'] = messages['addResults'] + result['addResults']
else:
messages['addResults'] = result['addResults']
else:
messages['errors'] = result
del params
del result
return messages
else:
oid_field = get_OID_field(fc)
OIDs = get_records_with_attachments(attachment_table=attachmentTable)
fl = create_feature_layer(fc, "%s not in ( %s )" % (oid_field, ",".join(OIDs)))
result = self.addFeatures(fl)
if result is not None:
messages.update(result)
del fl
for oid in OIDs:
fl = create_feature_layer(fc, "%s = %s" % (oid_field, oid), name="layer%s" % oid)
msgs = self.addFeatures(fl)
for result in msgs['addResults']:
oid_fs = result['objectId']
sends = get_attachment_data(attachmentTable, sql="%s = %s" % (rel_object_field, oid))
result['addAttachmentResults'] = []
for s in sends:
attRes = self.addAttachment(oid_fs, s['blob'])
if 'addAttachmentResult' in attRes:
attRes['addAttachmentResult']['AttachmentName'] = s['name']
result['addAttachmentResults'].append(attRes['addAttachmentResult'])
else:
attRes['AttachmentName'] = s['name']
result['addAttachmentResults'].append(attRes)
del s
del sends
del result
messages.update( msgs)
del fl
del oid
del OIDs
return messages
#----------------------------------------------------------------------
def calculate(self, where, calcExpression, sqlFormat="standard"):
"""
The calculate operation is performed on a feature service layer
resource. It updates the values of one or more fields in an
existing feature service layer based on SQL expressions or scalar
values. The calculate operation can only be used if the
supportsCalculate property of the layer is true.
Neither the Shape field nor system fields can be updated using
calculate. System fields include ObjectId and GlobalId.
See Calculate a field for more information on supported expressions
Inputs:
where - A where clause can be used to limit the updated records.
Any legal SQL where clause operating on the fields in
the layer is allowed.
calcExpression - The array of field/value info objects that
contain the field or fields to update and their
scalar values or SQL expression. Allowed types
are dictionary and list. List must be a list
of dictionary objects.
Calculation Format is as follows:
{"field" : "<field name>",
"value" : "<value>"}
sqlFormat - The SQL format for the calcExpression. It can be
either standard SQL92 (standard) or native SQL
(native). The default is standard.
Values: standard, native
Output:
JSON as string
Usage:
>>>sh = arcrest.AGOLTokenSecurityHandler("user", "pw")
>>>fl = arcrest.agol.FeatureLayer(url="someurl",
securityHandler=sh, initialize=True)
>>>print fl.calculate(where="OBJECTID < 2",
calcExpression={"field": "ZONE",
"value" : "R1"})
{'updatedFeatureCount': 1, 'success': True}
"""
url = self._url + "/calculate"
params = {
"f" : "json",
"where" : where,
}
if isinstance(calcExpression, dict):
params["calcExpression"] = json.dumps([calcExpression],
default=_date_handler)
elif isinstance(calcExpression, list):
params["calcExpression"] = json.dumps(calcExpression,
default=_date_handler)
if sqlFormat.lower() in ['native', 'standard']:
params['sqlFormat'] = sqlFormat.lower()
else:
params['sqlFormat'] = "standard"
return self._do_post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
########################################################################
class TableLayer(FeatureLayer):
"""Table object is exactly like FeatureLayer object"""
pass
|
apache-2.0
| -7,880,301,906,038,631,000
| 42.296502
| 105
| 0.482191
| false
| 5.572539
| false
| false
| false
|
alphacsc/alphacsc
|
alphacsc/other/swm.py
|
1
|
5148
|
"""
Code adopted from Voytek Lab package neurodsp:
https://github.com/voytekresearch/neurodsp/blob/master/neurodsp/shape/swm.py
The sliding window matching algorithm identifies the waveform shape of
neural oscillations using correlations.
"""
# Authors: Scott Cole
# Mainak Jas <mainak.jas@telecom-paristech.fr>
import numpy as np
from scipy.spatial.distance import pdist
from alphacsc.utils import check_random_state
def sliding_window_matching(x, L, G, max_iterations=500, T=1,
window_starts_custom=None, random_state=None):
"""Find recurring patterns in a time series using SWM algorithm.
Parameters
----------
x : array-like 1d
voltage time series
L : float
window length (seconds)
G : float
minimum window spacing (seconds)
T : float
temperature parameter. Controls probability of accepting a new window
max_iterations : int
Maximum number of iterations of potential changes in window placement
window_starts_custom : np.ndarray (1d)
Pre-set locations of initial windows (instead of evenly spaced by 2G)
random_state : int
The random state
Returns
-------
avg_window : ndarray (1d)
The average waveform in x.
window_starts : ndarray (1d)
Indices at which each window begins for the final set of windows
J : np.ndarray (1d)
Cost function value at each iteration
References
----------
Gips, B., Bahramisharif, A., Lowet, E., Roberts, M. J., de Weerd, P.,
Jensen, O., & van der Eerden, J. (2017). Discovering recurring
patterns in electrophysiological recordings.
Journal of Neuroscience Methods, 275, 66-79.
MATLAB code: https://github.com/bartgips/SWM
Notes
-----
* Apply a highpass filter if looking at high frequency activity,
so that it does not converge on a low frequency motif
* L and G should be chosen to be about the size of the motif of interest
"""
rng = check_random_state(random_state)
# Initialize window positions, separated by 2*G
if window_starts_custom is None:
window_starts = np.arange(0, len(x) - L, 2 * G)
else:
window_starts = window_starts_custom
N_windows = len(window_starts)
# Calculate initial cost
J = np.zeros(max_iterations)
J[0] = _compute_J(x, window_starts, L)
# Randomly sample windows with replacement
random_window_idx = rng.choice(range(N_windows), size=max_iterations)
# For each iteration, randomly replace a window with a new window
# to improve cross-window similarity
for idx in range(1, max_iterations):
# Pick a random window position
window_idx_replace = random_window_idx[idx]
# Find a new allowed position for the window
window_starts_temp = np.copy(window_starts)
window_starts_temp[window_idx_replace] = _find_new_windowidx(
window_starts, G, L, len(x) - L, rng)
# Calculate the cost with replaced windows
J_temp = _compute_J(x, window_starts_temp, L)
# Calculate the change in cost function
deltaJ = J_temp - J[idx - 1]
# Calculate the acceptance probability
p_accept = np.exp(-deltaJ / float(T))
# Accept update to J with a certain probability
if rng.rand() < p_accept:
J[idx] = J_temp
# Update X
window_starts = window_starts_temp
else:
J[idx] = J[idx - 1]
print('[iter %03d] Cost function: %s' % (idx, J[idx]))
# Calculate average window
avg_window = np.zeros(L)
for w in range(N_windows):
avg_window += x[window_starts[w]:window_starts[w] + L]
avg_window = avg_window / float(N_windows)
return avg_window, window_starts, J
def _compute_J(x, window_starts, L):
"""Compute the cost, which is proportional to the
difference between pairs of windows"""
# Get all windows and zscore them
N_windows = len(window_starts)
windows = np.zeros((N_windows, L))
for w in range(N_windows):
temp = x[window_starts[w]:window_starts[w] + L]
windows[w] = (temp - np.mean(temp)) / np.std(temp)
# Calculate distances for all pairs of windows
dist = pdist(np.vstack(windows),
lambda u, v: np.sum((u - v) ** 2))
J = np.sum(dist) / float(L * (N_windows - 1))
return J
def _find_new_windowidx(window_starts, G, L, N_samp, rng,
tries_limit=1000):
"""Find a new sample for the starting window"""
found = False
N_tries = 0
while found is False:
# Generate a random sample
new_samp = rng.randint(N_samp)
# Check how close the sample is to other window starts
dists = np.abs(window_starts - new_samp)
if np.min(dists) > G:
return new_samp
else:
N_tries += 1
if N_tries > tries_limit:
raise RuntimeError('SWM algorithm has difficulty finding a new'
' window. Increase the spacing parameter,'
' G.')
|
bsd-3-clause
| -5,241,638,121,785,414,000
| 32.428571
| 79
| 0.621018
| false
| 3.73314
| false
| false
| false
|
Peter-Liang/CodeWars-Python
|
solutions/Conway_s_Game_of_Life_Unlimited_Edition.py
|
1
|
1775
|
"""
Conway's Game of Life - Unlimited Edition
http://www.codewars.com/kata/52423db9add6f6fc39000354/train/python
"""
from copy import deepcopy
def get_generation(cells, generations):
origin = deepcopy(cells)
if generations == 0:
return origin
if generations > 1:
origin = get_generation(origin, generations - 1)
for row in origin:
row.insert(0, 0)
row.append(0)
origin.insert(0, [0] * len(origin[0]))
origin.append([0] * len(origin[0]))
result = deepcopy(origin)
for r in range(len(origin)):
for c in range(len(origin[0])):
neighbours = get_living_neighbours(origin, r, c)
if neighbours > 3 or neighbours < 2:
result[r][c] = 0
elif neighbours == 3:
result[r][c] = 1
trim_result(result)
return result
def trim_result(result):
while is_row_all_empty(result[0]):
result.pop(0)
while is_row_all_empty(result[-1]):
result.pop()
start_empty, end_empty = True, True
while start_empty or end_empty:
for r in result:
if r[0] != 0:
start_empty = False
if r[-1] != 0:
end_empty = False
for r in result:
if start_empty:
r.pop(0)
if end_empty:
r.pop()
def is_row_all_empty(row):
return sum(row) == 0
def get_living_neighbours(cells, row, col):
livings = 0
for r in [-1, 0, 1]:
if 0 <= row + r <= len(cells) - 1:
for c in [-1, 0, 1]:
if 0 <= col + c <= len(cells[0]) - 1:
if c == 0 and r == 0:
continue
livings += cells[row + r][col + c]
return livings
|
mit
| 7,685,390,107,174,797,000
| 25.117647
| 66
| 0.514366
| false
| 3.400383
| false
| false
| false
|
Mercy-Nekesa/sokoapp
|
sokoapp/tracking/utils.py
|
1
|
5688
|
import re
headers = ('HTTP_CLIENT_IP', 'HTTP_X_FORWARDED_FOR', 'HTTP_X_FORWARDED',
'HTTP_X_CLUSTERED_CLIENT_IP', 'HTTP_FORWARDED_FOR', 'HTTP_FORWARDED',
'REMOTE_ADDR')
# Back ported from Django trunk
# This code was mostly based on ipaddr-py
# Copyright 2007 Google Inc. http://code.google.com/p/ipaddr-py/
# Licensed under the Apache License, Version 2.0 (the "License").
ipv4_re = re.compile(r'^(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}$')
def is_valid_ipv4_address(ip_str):
return bool(ipv4_re.match(ip_str))
def is_valid_ipv6_address(ip_str):
"""
Ensure we have a valid IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A boolean, True if this is a valid IPv6 address.
"""
# We need to have at least one ':'.
if ':' not in ip_str:
return False
# We can only have one '::' shortener.
if ip_str.count('::') > 1:
return False
# '::' should be encompassed by start, digits or end.
if ':::' in ip_str:
return False
# A single colon can neither start nor end an address.
if ((ip_str.startswith(':') and not ip_str.startswith('::')) or
(ip_str.endswith(':') and not ip_str.endswith('::'))):
return False
# We can never have more than 7 ':' (1::2:3:4:5:6:7:8 is invalid)
if ip_str.count(':') > 7:
return False
# If we have no concatenation, we need to have 8 fields with 7 ':'.
if '::' not in ip_str and ip_str.count(':') != 7:
# We might have an IPv4 mapped address.
if ip_str.count('.') != 3:
return False
ip_str = _explode_shorthand_ip_string(ip_str)
# Now that we have that all squared away, let's check that each of the
# hextets are between 0x0 and 0xFFFF.
for hextet in ip_str.split(':'):
if hextet.count('.') == 3:
# If we have an IPv4 mapped address, the IPv4 portion has to
# be at the end of the IPv6 portion.
if not ip_str.split(':')[-1] == hextet:
return False
if not is_valid_ipv4_address(hextet):
return False
else:
try:
# a value error here means that we got a bad hextet,
# something like 0xzzzz
if int(hextet, 16) < 0x0 or int(hextet, 16) > 0xFFFF:
return False
except ValueError:
return False
return True
def _sanitize_ipv4_mapping(ip_str):
"""
Sanitize IPv4 mapping in a expanded IPv6 address.
This converts ::ffff:0a0a:0a0a to ::ffff:10.10.10.10.
If there is nothing to sanitize, returns an unchanged
string.
Args:
ip_str: A string, the expanded IPv6 address.
Returns:
The sanitized output string, if applicable.
"""
if not ip_str.lower().startswith('0000:0000:0000:0000:0000:ffff:'):
# not an ipv4 mapping
return ip_str
hextets = ip_str.split(':')
if '.' in hextets[-1]:
# already sanitized
return ip_str
ipv4_address = "%d.%d.%d.%d" % (
int(hextets[6][0:2], 16),
int(hextets[6][2:4], 16),
int(hextets[7][0:2], 16),
int(hextets[7][2:4], 16),
)
result = ':'.join(hextets[0:6])
result += ':' + ipv4_address
return result
def _unpack_ipv4(ip_str):
"""
Unpack an IPv4 address that was mapped in a compressed IPv6 address.
This converts 0000:0000:0000:0000:0000:ffff:10.10.10.10 to 10.10.10.10.
If there is nothing to sanitize, returns None.
Args:
ip_str: A string, the expanded IPv6 address.
Returns:
The unpacked IPv4 address, or None if there was nothing to unpack.
"""
if not ip_str.lower().startswith('0000:0000:0000:0000:0000:ffff:'):
return None
hextets = ip_str.split(':')
return hextets[-1]
def _explode_shorthand_ip_string(ip_str):
"""
Expand a shortened IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A string, the expanded IPv6 address.
"""
if not _is_shorthand_ip(ip_str):
# We've already got a longhand ip_str.
return ip_str
new_ip = []
hextet = ip_str.split('::')
# If there is a ::, we need to expand it with zeroes
# to get to 8 hextets - unless there is a dot in the last hextet,
# meaning we're doing v4-mapping
if '.' in ip_str.split(':')[-1]:
fill_to = 7
else:
fill_to = 8
if len(hextet) > 1:
sep = len(hextet[0].split(':')) + len(hextet[1].split(':'))
new_ip = hextet[0].split(':')
for _ in range(fill_to - sep):
new_ip.append('0000')
new_ip += hextet[1].split(':')
else:
new_ip = ip_str.split(':')
# Now need to make sure every hextet is 4 lower case characters.
# If a hextet is < 4 characters, we've got missing leading 0's.
ret_ip = []
for hextet in new_ip:
ret_ip.append(('0' * (4 - len(hextet)) + hextet).lower())
return ':'.join(ret_ip)
def _is_shorthand_ip(ip_str):
"""Determine if the address is shortened.
Args:
ip_str: A string, the IPv6 address.
Returns:
A boolean, True if the address is shortened.
"""
if ip_str.count('::') == 1:
return True
if [x for x in ip_str.split(':') if len(x) < 4]:
return True
return False
def get_ip_address(request):
for header in headers:
if request.META.get(header, None):
ip = request.META[header].split(',')[0]
if ':' in ip and is_valid_ipv6_address(ip) or is_valid_ipv4_address(ip):
return ip
|
mit
| -1,081,139,503,090,599,600
| 27.582915
| 94
| 0.56962
| false
| 3.40395
| false
| false
| false
|
pymor/dune-hdd
|
examples/linearparabolic/morepas3__prepare.py
|
1
|
23451
|
#!/usr/bin/env python2
#
# This file is part of the dune-hdd project:
# https://github.com/pymor/dune-hdd
# Copyright Holders: Felix Schindler
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
from __future__ import division, print_function
import numpy as np
from functools import partial
from pymor.algorithms.timestepping import ImplicitEulerTimeStepper
from pymor.core.logger import getLogger
from pymor.discretizations.basic import InstationaryDiscretization
from pymor.grids.oned import OnedGrid
from pymor.parameters.spaces import CubicParameterSpace
from pymor.vectorarrays.list import ListVectorArray
from dune.pymor.la.container import make_listvectorarray
from generic_multiscale import dune_module, examples, wrapper
logger = getLogger('.morepas3.prepare')
logger.setLevel('INFO')
class InstationaryDuneVisualizer(object):
def __init__(self, disc, prefix):
self.disc = disc
self.prefix = prefix
def visualize(self, U, *args, **kwargs):
import numpy as np
dune_disc = self.disc._impl
assert isinstance(U, ListVectorArray)
filename = kwargs['filename'] if 'filename' in kwargs else self.prefix
size = len(U)
pad = len(str(size))
for ss in np.arange(size):
dune_disc.visualize(U._list[ss]._impl,
filename + '_' + str(ss).zfill(pad),
'solution',
False) # do not add dirichlet shift
def bochner_norm(T, space_norm2, U, mu=None, order=2):
'''
L^2-in-time, X-in-space
'''
nt = len(U)
time_grid = OnedGrid(domain=(0., T), num_intervals=nt-1)
assert len(U) == time_grid.size(1)
qq = time_grid.quadrature_points(0, order=order)
integral = 0.
for entity in np.arange(time_grid.size(0)):
# get quadrature
qq_e = qq[entity] # points
ww = time_grid.reference_element.quadrature(order)[1] # weights
ie = time_grid.integration_elements(0)[entity] # integration element
# create shape function evaluations
a = time_grid.centers(1)[entity]
b = time_grid.centers(1)[entity + 1]
SF = np.array((1./(a - b)*qq_e[..., 0] - b/(a - b),
1./(b - a)*qq_e[..., 0] - a/(b - a)))
U_a = U._list[entity]
U_b = U._list[entity + 1]
values = np.zeros(len(qq_e))
for ii in np.arange(len(qq_e)):
# compute U(t)
U_t = U_a.copy()
U_t.scal(SF[0][ii])
U_t.axpy(SF[1][ii], U_b)
# compute the X-norm of U(t)
values[ii] = space_norm2(make_listvectorarray(U_t), mu)
integral += np.dot(values, ww)*ie
return np.sqrt(integral)
def discretize(num_elements, num_partitions, T, nt, initial_data, parameter_range, name='detailed discretization'):
Example = examples[2]['aluconformgrid']['fem']['istl']
logger_cfg = Example.logger_options()
logger_cfg.set('info', -1, True)
logger_cfg.set('info_color', 'blue', True)
grid_cfg = Example.grid_options('grid.multiscale.provider.cube')
grid_cfg.set('lower_left', '[0 0]', True)
grid_cfg.set('upper_right', '[5 1]', True)
grid_cfg.set('num_elements', num_elements, True)
grid_cfg.set('num_partitions', num_partitions, True)
boundary_cfg = Example.boundary_options('stuff.grid.boundaryinfo.alldirichlet')
problem_cfg = Example.problem_options('hdd.linearelliptic.problem.OS2015.spe10model1')
problem_cfg.set('parametric_channel', 'true', True)
problem_cfg.set('channel_boundary_layer', '0', True)
problem_cfg.set('filename', 'perm_case1.dat', True)
problem_cfg.set('lower_left', '[0 0]', True)
problem_cfg.set('upper_right', '[5 1]', True)
problem_cfg.set('num_elements', '[100 20]', True)
problem_cfg.set('forces.0.domain', '[0.95 1.10; 0.30 0.45]', True)
problem_cfg.set('forces.0.value', '2000', True)
problem_cfg.set('forces.1.domain', '[3.00 3.15; 0.75 0.90]', True)
problem_cfg.set('forces.1.value', '-1000', True)
problem_cfg.set('forces.2.domain', '[4.25 4.40; 0.25 0.40]', True)
problem_cfg.set('forces.2.value', '-1000', True)
problem_cfg.set('channel.0.value', '-1.07763239495', True)
problem_cfg.set('channel.1.value', '-1.07699512772', True)
problem_cfg.set('channel.2.value', '-1.07356156439', True)
problem_cfg.set('channel.3.value', '-1.06602281736', True)
problem_cfg.set('channel.4.value', '-1.06503683743', True)
problem_cfg.set('channel.5.value', '-1.07974870426', True)
problem_cfg.set('channel.6.value', '-1.05665895923', True)
problem_cfg.set('channel.7.value', '-1.08310334837', True)
problem_cfg.set('channel.8.value', '-1.05865484973', True)
problem_cfg.set('channel.9.value', '-1.05871039535', True)
problem_cfg.set('channel.10.value', '-1.08136695901', True)
problem_cfg.set('channel.11.value', '-1.08490172721', True)
problem_cfg.set('channel.12.value', '-1.06641120758', True)
problem_cfg.set('channel.13.value', '-1.06812773298', True)
problem_cfg.set('channel.14.value', '-1.07695652049', True)
problem_cfg.set('channel.15.value', '-1.08630079205', True)
problem_cfg.set('channel.16.value', '-1.08273722112', True)
problem_cfg.set('channel.17.value', '-1.07500402155', True)
problem_cfg.set('channel.18.value', '-1.08607142562', True)
problem_cfg.set('channel.19.value', '-1.07268761799', True)
problem_cfg.set('channel.20.value', '-1.08537037362', True)
problem_cfg.set('channel.21.value', '-1.08466927273', True)
problem_cfg.set('channel.22.value', '-1.08444661815', True)
problem_cfg.set('channel.23.value', '-1.08957037967', True)
problem_cfg.set('channel.24.value', '-1.08047394052', True)
problem_cfg.set('channel.25.value', '-1.08221229083', True)
problem_cfg.set('channel.26.value', '-1.08568599863', True)
problem_cfg.set('channel.27.value', '-1.08428347872', True)
problem_cfg.set('channel.28.value', '-1.09104098734', True)
problem_cfg.set('channel.29.value', '-1.09492700673', True)
problem_cfg.set('channel.30.value', '-1.09760440537', True)
problem_cfg.set('channel.31.value', '-1.09644989453', True)
problem_cfg.set('channel.32.value', '-1.09441681025', True)
problem_cfg.set('channel.33.value', '-1.09533290654', True)
problem_cfg.set('channel.34.value', '-1.1001430808', True)
problem_cfg.set('channel.35.value', '-1.10065627621', True)
problem_cfg.set('channel.36.value', '-1.10125877186', True)
problem_cfg.set('channel.37.value', '-1.10057485893', True)
problem_cfg.set('channel.38.value', '-1.10002261906', True)
problem_cfg.set('channel.39.value', '-1.10219154209', True)
problem_cfg.set('channel.40.value', '-1.09994463801', True)
problem_cfg.set('channel.41.value', '-1.10265630533', True)
problem_cfg.set('channel.42.value', '-1.10448566526', True)
problem_cfg.set('channel.43.value', '-1.10735820121', True)
problem_cfg.set('channel.44.value', '-1.1070022367', True)
problem_cfg.set('channel.45.value', '-1.10777650387', True)
problem_cfg.set('channel.46.value', '-1.10892785562', True)
problem_cfg.set('channel.0.domain', '[1.7 1.75; 0.5 0.55]', True)
problem_cfg.set('channel.1.domain', '[1.75 1.8; 0.5 0.55]', True)
problem_cfg.set('channel.2.domain', '[1.8 1.85; 0.5 0.55]', True)
problem_cfg.set('channel.3.domain', '[1.85 1.9; 0.5 0.55]', True)
problem_cfg.set('channel.4.domain', '[1.9 1.95; 0.5 0.55]', True)
problem_cfg.set('channel.5.domain', '[1.95 2.0; 0.5 0.55]', True)
problem_cfg.set('channel.6.domain', '[2.0 2.05; 0.5 0.55]', True)
problem_cfg.set('channel.7.domain', '[2.05 2.1; 0.5 0.55]', True)
problem_cfg.set('channel.8.domain', '[2.1 2.15; 0.5 0.55]', True)
problem_cfg.set('channel.9.domain', '[2.15 2.2; 0.5 0.55]', True)
problem_cfg.set('channel.10.domain', '[2.2 2.25; 0.5 0.55]', True)
problem_cfg.set('channel.11.domain', '[2.25 2.3; 0.5 0.55]', True)
problem_cfg.set('channel.12.domain', '[2.3 2.35; 0.5 0.55]', True)
problem_cfg.set('channel.13.domain', '[2.35 2.4; 0.5 0.55]', True)
problem_cfg.set('channel.14.domain', '[2.4 2.45; 0.5 0.55]', True)
problem_cfg.set('channel.15.domain', '[2.45 2.5; 0.5 0.55]', True)
problem_cfg.set('channel.16.domain', '[2.5 2.55; 0.5 0.55]', True)
problem_cfg.set('channel.17.domain', '[2.55 2.6; 0.5 0.55]', True)
problem_cfg.set('channel.18.domain', '[2.6 2.65; 0.5 0.55]', True)
problem_cfg.set('channel.19.domain', '[2.65 2.7; 0.5 0.55]', True)
problem_cfg.set('channel.20.domain', '[2.7 2.75; 0.5 0.55]', True)
problem_cfg.set('channel.21.domain', '[2.75 2.8; 0.5 0.55]', True)
problem_cfg.set('channel.22.domain', '[2.8 2.85; 0.5 0.55]', True)
problem_cfg.set('channel.23.domain', '[2.85 2.9; 0.5 0.55]', True)
problem_cfg.set('channel.24.domain', '[2.9 2.95; 0.5 0.55]', True)
problem_cfg.set('channel.25.domain', '[2.95 3.0; 0.5 0.55]', True)
problem_cfg.set('channel.26.domain', '[3.0 3.05; 0.5 0.55]', True)
problem_cfg.set('channel.27.domain', '[3.05 3.1; 0.5 0.55]', True)
problem_cfg.set('channel.28.domain', '[3.1 3.15; 0.5 0.55]', True)
problem_cfg.set('channel.29.domain', '[3.15 3.2; 0.5 0.55]', True)
problem_cfg.set('channel.30.domain', '[3.2 3.25; 0.5 0.55]', True)
problem_cfg.set('channel.31.domain', '[3.25 3.3; 0.5 0.55]', True)
problem_cfg.set('channel.32.domain', '[3.3 3.35; 0.5 0.55]', True)
problem_cfg.set('channel.33.domain', '[3.35 3.4; 0.5 0.55]', True)
problem_cfg.set('channel.34.domain', '[3.4 3.45; 0.5 0.55]', True)
problem_cfg.set('channel.35.domain', '[3.45 3.5; 0.5 0.55]', True)
problem_cfg.set('channel.36.domain', '[3.5 3.55; 0.5 0.55]', True)
problem_cfg.set('channel.37.domain', '[3.55 3.6; 0.5 0.55]', True)
problem_cfg.set('channel.38.domain', '[3.6 3.65; 0.5 0.55]', True)
problem_cfg.set('channel.39.domain', '[3.65 3.7; 0.5 0.55]', True)
problem_cfg.set('channel.40.domain', '[3.7 3.75; 0.5 0.55]', True)
problem_cfg.set('channel.41.domain', '[3.75 3.8; 0.5 0.55]', True)
problem_cfg.set('channel.42.domain', '[3.8 3.85; 0.5 0.55]', True)
problem_cfg.set('channel.43.domain', '[3.85 3.9; 0.5 0.55]', True)
problem_cfg.set('channel.44.domain', '[3.9 3.95; 0.5 0.55]', True)
problem_cfg.set('channel.45.domain', '[3.95 4.0; 0.5 0.55]', True)
problem_cfg.set('channel.46.domain', '[4.0 4.05; 0.5 0.55]', True)
problem_cfg.set('channel.47.value', '-1.10372589211', True)
problem_cfg.set('channel.48.value', '-1.1020889988', True)
problem_cfg.set('channel.49.value', '-1.09806955069', True)
problem_cfg.set('channel.50.value', '-1.10000902421', True)
problem_cfg.set('channel.51.value', '-1.08797468724', True)
problem_cfg.set('channel.52.value', '-1.08827472176', True)
problem_cfg.set('channel.53.value', '-1.08692237109', True)
problem_cfg.set('channel.54.value', '-1.07893190093', True)
problem_cfg.set('channel.55.value', '-1.08748373853', True)
problem_cfg.set('channel.56.value', '-1.07445197324', True)
problem_cfg.set('channel.57.value', '-1.08246613163', True)
problem_cfg.set('channel.58.value', '-1.06726790504', True)
problem_cfg.set('channel.59.value', '-1.07891217847', True)
problem_cfg.set('channel.60.value', '-1.07260827126', True)
problem_cfg.set('channel.61.value', '-1.07094062748', True)
problem_cfg.set('channel.62.value', '-1.0692399429', True)
problem_cfg.set('channel.63.value', '-1.00099885701', True)
problem_cfg.set('channel.64.value', '-1.00109544002', True)
problem_cfg.set('channel.65.value', '-0.966491003242', True)
problem_cfg.set('channel.66.value', '-0.802284684014', True)
problem_cfg.set('channel.67.value', '-0.980790923021', True)
problem_cfg.set('channel.68.value', '-0.614478271687', True)
problem_cfg.set('channel.69.value', '-0.288129858959', True)
problem_cfg.set('channel.70.value', '-0.929509396842', True)
problem_cfg.set('channel.71.value', '-0.992376505995', True)
problem_cfg.set('channel.72.value', '-0.968162494855', True)
problem_cfg.set('channel.73.value', '-0.397316938901', True)
problem_cfg.set('channel.74.value', '-0.970934956609', True)
problem_cfg.set('channel.75.value', '-0.784344730096', True)
problem_cfg.set('channel.76.value', '-0.539725422323', True)
problem_cfg.set('channel.77.value', '-0.915632282372', True)
problem_cfg.set('channel.78.value', '-0.275089177273', True)
problem_cfg.set('channel.79.value', '-0.949684959286', True)
problem_cfg.set('channel.80.value', '-0.936132529794', True)
problem_cfg.set('channel.47.domain', '[2.6 2.65; 0.45 0.50]', True)
problem_cfg.set('channel.48.domain', '[2.65 2.7; 0.45 0.50]', True)
problem_cfg.set('channel.49.domain', '[2.7 2.75; 0.45 0.50]', True)
problem_cfg.set('channel.50.domain', '[2.75 2.8; 0.45 0.50]', True)
problem_cfg.set('channel.51.domain', '[2.8 2.85; 0.45 0.50]', True)
problem_cfg.set('channel.52.domain', '[2.85 2.9; 0.45 0.50]', True)
problem_cfg.set('channel.53.domain', '[2.9 2.95; 0.45 0.50]', True)
problem_cfg.set('channel.54.domain', '[2.95 3.0; 0.45 0.50]', True)
problem_cfg.set('channel.55.domain', '[3.0 3.05; 0.45 0.50]', True)
problem_cfg.set('channel.56.domain', '[3.05 3.1; 0.45 0.50]', True)
problem_cfg.set('channel.57.domain', '[3.1 3.15; 0.45 0.50]', True)
problem_cfg.set('channel.58.domain', '[3.15 3.2; 0.45 0.50]', True)
problem_cfg.set('channel.59.domain', '[3.2 3.25; 0.45 0.50]', True)
problem_cfg.set('channel.60.domain', '[3.25 3.3; 0.45 0.50]', True)
problem_cfg.set('channel.61.domain', '[3.3 3.35; 0.45 0.50]', True)
problem_cfg.set('channel.62.domain', '[3.35 3.4; 0.45 0.50]', True)
problem_cfg.set('channel.63.domain', '[3.4 3.45; 0.45 0.50]', True)
problem_cfg.set('channel.64.domain', '[3.45 3.5; 0.45 0.50]', True)
problem_cfg.set('channel.65.domain', '[3.5 3.55; 0.45 0.50]', True)
problem_cfg.set('channel.66.domain', '[3.55 3.6; 0.45 0.50]', True)
problem_cfg.set('channel.67.domain', '[3.6 3.65; 0.45 0.50]', True)
problem_cfg.set('channel.68.domain', '[3.65 3.7; 0.45 0.50]', True)
problem_cfg.set('channel.69.domain', '[3.7 3.75; 0.45 0.50]', True)
problem_cfg.set('channel.70.domain', '[3.75 3.8; 0.45 0.50]', True)
problem_cfg.set('channel.71.domain', '[3.8 3.85; 0.45 0.50]', True)
problem_cfg.set('channel.72.domain', '[3.85 3.9; 0.45 0.50]', True)
problem_cfg.set('channel.73.domain', '[3.9 3.95; 0.45 0.50]', True)
problem_cfg.set('channel.74.domain', '[3.95 4.0; 0.45 0.50]', True)
problem_cfg.set('channel.75.domain', '[4.0 4.05; 0.45 0.50]', True)
problem_cfg.set('channel.76.domain', '[4.05 4.1; 0.45 0.50]', True)
problem_cfg.set('channel.77.domain', '[4.1 4.15; 0.45 0.50]', True)
problem_cfg.set('channel.78.domain', '[4.15 4.2; 0.45 0.50]', True)
problem_cfg.set('channel.79.domain', '[4.2 4.25; 0.45 0.50]', True)
problem_cfg.set('channel.80.domain', '[4.25 4.3; 0.45 0.50]', True)
problem_cfg.set('channel.81.value', '-1.10923642795', True)
problem_cfg.set('channel.82.value', '-1.10685618623', True)
problem_cfg.set('channel.83.value', '-1.1057800376', True)
problem_cfg.set('channel.84.value', '-1.10187723629', True)
problem_cfg.set('channel.85.value', '-1.10351710464', True)
problem_cfg.set('channel.86.value', '-1.10037551137', True)
problem_cfg.set('channel.87.value', '-1.09724407076', True)
problem_cfg.set('channel.88.value', '-1.09604600208', True)
problem_cfg.set('channel.89.value', '-1.09354469656', True)
problem_cfg.set('channel.90.value', '-1.08934455354', True)
problem_cfg.set('channel.91.value', '-1.08155476586', True)
problem_cfg.set('channel.92.value', '-1.07815397899', True)
problem_cfg.set('channel.93.value', '-1.09174062023', True)
problem_cfg.set('channel.94.value', '-1.07433616068', True)
problem_cfg.set('channel.95.value', '-1.08030587701', True)
problem_cfg.set('channel.81.domain', '[1.95 2.0; 0.40 0.45]', True)
problem_cfg.set('channel.82.domain', '[2.0 2.05; 0.40 0.45]', True)
problem_cfg.set('channel.83.domain', '[2.05 2.1; 0.40 0.45]', True)
problem_cfg.set('channel.84.domain', '[2.1 2.15; 0.40 0.45]', True)
problem_cfg.set('channel.85.domain', '[2.15 2.2; 0.40 0.45]', True)
problem_cfg.set('channel.86.domain', '[2.2 2.25; 0.40 0.45]', True)
problem_cfg.set('channel.87.domain', '[2.25 2.3; 0.40 0.45]', True)
problem_cfg.set('channel.88.domain', '[2.3 2.35; 0.40 0.45]', True)
problem_cfg.set('channel.89.domain', '[2.35 2.4; 0.40 0.45]', True)
problem_cfg.set('channel.90.domain', '[2.4 2.45; 0.40 0.45]', True)
problem_cfg.set('channel.91.domain', '[2.45 2.5; 0.40 0.45]', True)
problem_cfg.set('channel.92.domain', '[2.5 2.55; 0.40 0.45]', True)
problem_cfg.set('channel.93.domain', '[2.55 2.6; 0.40 0.45]', True)
problem_cfg.set('channel.94.domain', '[2.6 2.65; 0.40 0.45]', True)
problem_cfg.set('channel.95.domain', '[2.65 2.7; 0.40 0.45]', True)
problem_cfg.set('channel.96.value', '-1.00032869407', True)
problem_cfg.set('channel.97.value', '-1.01175908905', True)
problem_cfg.set('channel.98.value', '-1.04954395793', True)
problem_cfg.set('channel.99.value', '-1.017967697', True)
problem_cfg.set('channel.100.value', '-1.04647184091', True)
problem_cfg.set('channel.101.value', '-1.01911894831', True)
problem_cfg.set('channel.102.value', '-1.00699340158', True)
problem_cfg.set('channel.103.value', '-0.995492960025', True)
problem_cfg.set('channel.104.value', '-1.0373059007', True)
problem_cfg.set('channel.96.domain', '[2.25 2.3; 0.35 0.40]', True)
problem_cfg.set('channel.97.domain', '[2.3 2.35; 0.35 0.40]', True)
problem_cfg.set('channel.98.domain', '[2.35 2.4; 0.35 0.40]', True)
problem_cfg.set('channel.99.domain', '[2.4 2.45; 0.35 0.40]', True)
problem_cfg.set('channel.100.domain', '[2.45 2.5; 0.35 0.40]', True)
problem_cfg.set('channel.101.domain', '[2.5 2.55; 0.35 0.40]', True)
problem_cfg.set('channel.102.domain', '[2.55 2.6; 0.35 0.40]', True)
problem_cfg.set('channel.103.domain', '[2.6 2.65; 0.35 0.40]', True)
problem_cfg.set('channel.104.domain', '[2.65 2.7; 0.35 0.4]', True)
example = Example(logger_cfg, grid_cfg, boundary_cfg, problem_cfg, ['l2', 'h1', 'elliptic_penalty'])
elliptic_LRBMS_disc = wrapper[example.discretization()]
parameter_space = CubicParameterSpace(elliptic_LRBMS_disc.parameter_type, parameter_range[0], parameter_range[1])
elliptic_LRBMS_disc = elliptic_LRBMS_disc.with_(parameter_space=parameter_space)
elliptic_disc = elliptic_LRBMS_disc.as_nonblocked().with_(parameter_space=parameter_space)
def prolong(coarse_disc, coarse_U):
time_grid_ref = OnedGrid(domain=(0., T), num_intervals=nt)
time_grid = OnedGrid(domain=(0., T), num_intervals=(len(coarse_U) - 1))
U_fine = [None for ii in time_grid_ref.centers(1)]
for n in np.arange(len(time_grid_ref.centers(1))):
t_n = time_grid_ref.centers(1)[n]
coarse_entity = min((time_grid.centers(1) <= t_n).nonzero()[0][-1],
time_grid.size(0) - 1)
a = time_grid.centers(1)[coarse_entity]
b = time_grid.centers(1)[coarse_entity + 1]
SF = np.array((1./(a - b)*t_n - b/(a - b),
1./(b - a)*t_n - a/(b - a)))
U_t = coarse_U.copy(ind=coarse_entity)
U_t.scal(SF[0][0])
U_t.axpy(SF[1][0], coarse_U, x_ind=(coarse_entity + 1))
U_fine[n] = wrapper[example.prolong(coarse_disc._impl, U_t._list[0]._impl)]
return make_listvectorarray(U_fine)
if isinstance(initial_data, str):
initial_data = make_listvectorarray(wrapper[example.project(initial_data)])
# initial_data = elliptic_disc.operator.apply_inverse(initial_data, mu=(1, 1))
else:
coarse_disc = initial_data[0]
initial_data = initial_data[1]
assert len(initial_data) == 1
initial_data = example.prolong(coarse_disc._impl, initial_data._list[0]._impl)
initial_data = make_listvectorarray(wrapper[initial_data])
parabolic_disc = InstationaryDiscretization(T=T,
initial_data=initial_data,
operator=elliptic_disc.operator,
rhs=elliptic_disc.rhs,
mass=elliptic_disc.products['l2'],
time_stepper=ImplicitEulerTimeStepper(nt, solver_options='operator'),
products=elliptic_disc.products,
operators=elliptic_disc.operators,
functionals=elliptic_disc.functionals,
vector_operators=elliptic_disc.vector_operators,
visualizer=InstationaryDuneVisualizer(elliptic_disc, 'dune_discretization.solution'),
parameter_space=parameter_space,
cache_region='disk',
name='{} ({} DoFs)'.format(name, elliptic_disc.solution_space.dim))
return {'example': example,
'initial_data': initial_data,
'wrapper': wrapper,
'elliptic_LRBMS_disc': elliptic_LRBMS_disc,
'elliptic_disc': elliptic_disc,
'parabolic_disc': parabolic_disc,
'prolongator': prolong}
def prepare(cfg):
detailed_data = discretize(cfg['dune_num_elements'], cfg['dune_num_partitions'], cfg['end_time'], cfg['nt'],
cfg['initial_data'], (cfg['mu_min'], cfg['mu_max']))
wrapper, parabolic_disc = detailed_data['wrapper'], detailed_data['parabolic_disc']
logger.info('creating products and norms ...')
for tp in ('mu_bar', 'mu_hat', 'mu_tilde', 'mu_min', 'mu_max'):
detailed_data[tp] = parabolic_disc.parse_parameter(cfg[tp])
space_products = {}
for kk, prod in parabolic_disc.products.items():
space_products[kk] = prod
if prod.parametric:
for tp in 'mu_bar', 'mu_hat', 'mu_tilde':
mu = wrapper.dune_parameter(detailed_data[tp])
space_products['{}_{}'.format(kk, tp)] = wrapper[prod._impl.freeze_parameter(mu)]
def create_norm2(prod):
def norm2(U, mu=None):
return prod.apply2(U, U, mu=mu)[0][0]
return norm2
space_norms2 = {kk: create_norm2(prod)
for kk, prod in space_products.items()}
def create_bochner_norm(space_norm2):
return partial(bochner_norm, cfg['end_time'], space_norm2, order=cfg['integration_order_time'])
bochner_norms = {kk: create_bochner_norm(space_norm2)
for kk, space_norm2 in space_norms2.items()}
detailed_data['space_products'] = space_products
detailed_data['space_norms2'] = space_norms2
detailed_data['bochner_norms'] = bochner_norms
return detailed_data
|
bsd-2-clause
| -5,464,840,250,336,528,000
| 54.835714
| 133
| 0.612255
| false
| 2.81322
| false
| false
| false
|
wateraccounting/wa
|
Collect/MOD9/DataAccess.py
|
1
|
12824
|
# -*- coding: utf-8 -*-
"""
Authors: Tim Hessels
UNESCO-IHE 2016
Contact: t.hessels@unesco-ihe.org
Repository: https://github.com/wateraccounting/wa
Module: Collect/MOD9
"""
# import general python modules
import os
import numpy as np
import pandas as pd
import gdal
import urllib
import urllib2
from bs4 import BeautifulSoup
import re
import urlparse
import glob
import requests
from joblib import Parallel, delayed
# Water Accounting modules
import wa
import wa.General.raster_conversions as RC
import wa.General.data_conversions as DC
from wa import WebAccounts
def DownloadData(Dir, Startdate, Enddate, latlim, lonlim, Waitbar, cores, hdf_library, remove_hdf):
"""
This function downloads MOD9 daily data
Keyword arguments:
Dir -- 'C:/file/to/path/'
Startdate -- 'yyyy-mm-dd'
Enddate -- 'yyyy-mm-dd'
latlim -- [ymin, ymax] (values must be between -90 and 90)
lonlim -- [xmin, xmax] (values must be between -180 and 180)
cores -- The number of cores used to run the routine. It can be 'False'
to avoid using parallel computing routines.
Waitbar -- 1 (Default) will print a waitbar
"""
# Check start and end date and otherwise set the date to max
if not Startdate:
Startdate = pd.Timestamp('2000-02-24')
if not Enddate:
Enddate = pd.Timestamp('Now')
# Make an array of the days of which the NDVI is taken
Dates = pd.date_range(Startdate, Enddate, freq = 'D')
# Create Waitbar
if Waitbar == 1:
import wa.Functions.Start.WaitbarConsole as WaitbarConsole
total_amount = len(Dates)
amount = 0
WaitbarConsole.printWaitBar(amount, total_amount, prefix = 'Progress:', suffix = 'Complete', length = 50)
# Check the latitude and longitude and otherwise set lat or lon on greatest extent
if latlim[0] < -90 or latlim[1] > 90:
print 'Latitude above 90N or below 90S is not possible. Value set to maximum'
latlim[0] = np.max(latlim[0], -90)
latlim[1] = np.min(latlim[1], 90)
if lonlim[0] < -180 or lonlim[1] > 180:
print 'Longitude must be between 180E and 180W. Now value is set to maximum'
lonlim[0] = np.max(lonlim[0], -180)
lonlim[1] = np.min(lonlim[1], 180)
# Make directory for the MODIS NDVI data
Dir = Dir.replace("/", os.sep)
output_folder = os.path.join(Dir, 'Reflectance', 'MOD9')
if not os.path.exists(output_folder):
os.makedirs(output_folder)
TilesVertical, TilesHorizontal = wa.Collect.MOD15.DataAccess.Get_tiles_from_txt(output_folder, hdf_library, latlim, lonlim)
# Pass variables to parallel function and run
args = [output_folder, TilesVertical, TilesHorizontal, lonlim, latlim, hdf_library]
if not cores:
for Date in Dates:
RetrieveData(Date, args)
if Waitbar == 1:
amount += 1
WaitbarConsole.printWaitBar(amount, total_amount, prefix = 'Progress:', suffix = 'Complete', length = 50)
results = True
else:
results = Parallel(n_jobs=cores)(delayed(RetrieveData)(Date, args)
for Date in Dates)
if remove_hdf == 1:
# Remove all .hdf files
os.chdir(output_folder)
files = glob.glob("*.hdf")
for f in files:
os.remove(os.path.join(output_folder, f))
# Remove all .txt files
files = glob.glob("*.txt")
for f in files:
os.remove(os.path.join(output_folder, f))
return results
def RetrieveData(Date, args):
"""
This function retrieves MOD9 Reflectance data for a given date from the
http://e4ftl01.cr.usgs.gov/ server.
Keyword arguments:
Date -- 'yyyy-mm-dd'
args -- A list of parameters defined in the DownloadData function.
"""
# Argument
[output_folder, TilesVertical, TilesHorizontal, lonlim, latlim, hdf_library] = args
# Collect the data from the MODIS webpage and returns the data and lat and long in meters of those tiles
try:
Collect_data(TilesHorizontal, TilesVertical, Date, output_folder, hdf_library)
except:
print "Was not able to download the file"
# Define the output name of the collect data function
name_collect = os.path.join(output_folder, 'Merged.tif')
# Reproject the MODIS product to epsg_to
epsg_to ='4326'
name_reprojected = RC.reproject_MODIS(name_collect, epsg_to)
# Clip the data to the users extend
data, geo = RC.clip_data(name_reprojected, latlim, lonlim)
# Save results as Gtiff
ReffileName = os.path.join(output_folder, 'Reflectance_MOD09GQ_-_daily_' + Date.strftime('%Y') + '.' + Date.strftime('%m') + '.' + Date.strftime('%d') + '.tif')
DC.Save_as_tiff(name=ReffileName, data=data, geo=geo, projection='WGS84')
# remove the side products
os.remove(os.path.join(output_folder, name_collect))
os.remove(os.path.join(output_folder, name_reprojected))
return True
def Collect_data(TilesHorizontal,TilesVertical,Date,output_folder, hdf_library):
'''
This function downloads all the needed MODIS tiles from http://e4ftl01.cr.usgs.gov/MOLT/MOD13Q1.006/ as a hdf file.
Keywords arguments:
TilesHorizontal -- [TileMin,TileMax] max and min horizontal tile number
TilesVertical -- [TileMin,TileMax] max and min vertical tile number
Date -- 'yyyy-mm-dd'
output_folder -- 'C:/file/to/path/'
'''
# Make a new tile for the data
sizeX = int((TilesHorizontal[1] - TilesHorizontal[0] + 1) * 4800)
sizeY = int((TilesVertical[1] - TilesVertical[0] + 1) * 4800)
DataTot = np.zeros((sizeY, sizeX))
# Load accounts
username, password = WebAccounts.Accounts(Type = 'NASA')
# Create the Lat and Long of the MODIS tile in meters
for Vertical in range(int(TilesVertical[0]), int(TilesVertical[1])+1):
Distance = 231.65635826395834 # resolution of a MODIS pixel in meter
countY=(TilesVertical[1] - TilesVertical[0] + 1) - (Vertical - TilesVertical[0])
for Horizontal in range(int(TilesHorizontal[0]), int(TilesHorizontal[1]) + 1):
countX=Horizontal - TilesHorizontal[0] + 1
# Download the MODIS NDVI data
url = 'https://e4ftl01.cr.usgs.gov/MOLT/MOD09GQ.006/' + Date.strftime('%Y') + '.' + Date.strftime('%m') + '.' + Date.strftime('%d') + '/'
# Reset the begin parameters for downloading
downloaded = 0
N=0
# Check the library given by user
if hdf_library is not None:
os.chdir(hdf_library)
hdf_name = glob.glob("MOD09GQ.A%s%03s.h%02dv%02d.*" %(Date.strftime('%Y'), Date.strftime('%j'), Horizontal, Vertical))
if len(hdf_name) == 1:
hdf_file = os.path.join(hdf_library, hdf_name[0])
if os.path.exists(hdf_file):
downloaded = 1
file_name = hdf_file
if not downloaded == 1:
# Get files on FTP server
f = urllib2.urlopen(url)
# Sum all the files on the server
soup = BeautifulSoup(f, "lxml")
for i in soup.findAll('a', attrs = {'href': re.compile('(?i)(hdf)$')}):
# Find the file with the wanted tile number
Vfile=str(i)[30:32]
Hfile=str(i)[27:29]
if int(Vfile) is int(Vertical) and int(Hfile) is int(Horizontal):
# Define the whole url name
full_url = urlparse.urljoin(url, i['href'])
# if not downloaded try to download file
while downloaded == 0:
try:# open http and download whole .hdf
nameDownload = full_url
file_name = os.path.join(output_folder,nameDownload.split('/')[-1])
if os.path.isfile(file_name):
downloaded = 1
else:
x = requests.get(nameDownload, allow_redirects = False)
try:
y = requests.get(x.headers['location'], auth = (username, password))
except:
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
y = requests.get(x.headers['location'], auth = (username, password), verify = False)
z = open(file_name, 'wb')
z.write(y.content)
z.close()
statinfo = os.stat(file_name)
# Say that download was succesfull
if int(statinfo.st_size) > 10000:
downloaded = 1
# If download was not succesfull
except:
# Try another time
N = N + 1
# Stop trying after 10 times
if N == 10:
print 'Data from ' + Date.strftime('%Y-%m-%d') + ' is not available'
downloaded = 1
try:
# Open .hdf only band with NDVI and collect all tiles to one array
dataset = gdal.Open(file_name)
sdsdict = dataset.GetMetadata('SUBDATASETS')
sdslist = [sdsdict[k] for k in sdsdict.keys() if '_2_NAME' in k]
sds = []
for n in sdslist:
sds.append(gdal.Open(n))
full_layer = [i for i in sdslist if 'sur_refl_b01_1' in i]
idx = sdslist.index(full_layer[0])
if Horizontal == TilesHorizontal[0] and Vertical == TilesVertical[0]:
geo_t = sds[idx].GetGeoTransform()
# get the projection value
proj = sds[idx].GetProjection()
data = sds[idx].ReadAsArray()
countYdata = (TilesVertical[1] - TilesVertical[0] + 2) - countY
DataTot[int((countYdata - 1) * 4800):int(countYdata * 4800), int((countX - 1) * 4800):int(countX * 4800)]=data
del data
# if the tile not exists or cannot be opened, create a nan array with the right projection
except:
if Horizontal==TilesHorizontal[0] and Vertical==TilesVertical[0]:
x1 = (TilesHorizontal[0] - 19) * 4800 * Distance
x4 = (TilesVertical[0] - 9) * 4800 * -1 * Distance
geo = [x1, Distance, 0.0, x4, 0.0, -Distance]
geo_t=tuple(geo)
proj='PROJCS["unnamed",GEOGCS["Unknown datum based upon the custom spheroid",DATUM["Not specified (based on custom spheroid)",SPHEROID["Custom spheroid",6371007.181,0]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433]],PROJECTION["Sinusoidal"],PARAMETER["longitude_of_center",0],PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["Meter",1]]'
data=np.ones((4800,4800)) * (-9999)
countYdata=(TilesVertical[1] - TilesVertical[0] + 2) - countY
DataTot[(countYdata - 1) * 4800:countYdata * 4800,(countX - 1) * 4800:countX * 4800] = data
# Make geotiff file
name2 = os.path.join(output_folder, 'Merged.tif')
driver = gdal.GetDriverByName("GTiff")
dst_ds = driver.Create(name2, DataTot.shape[1], DataTot.shape[0], 1, gdal.GDT_Float32, ['COMPRESS=LZW'])
try:
dst_ds.SetProjection(proj)
except:
proj='PROJCS["unnamed",GEOGCS["Unknown datum based upon the custom spheroid",DATUM["Not specified (based on custom spheroid)",SPHEROID["Custom spheroid",6371007.181,0]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433]],PROJECTION["Sinusoidal"],PARAMETER["longitude_of_center",0],PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["Meter",1]]'
x1 = (TilesHorizontal[0] - 18) * 4800 * Distance
x4 = (TilesVertical[0] - 9) * 4800 * -1 * Distance
geo = [x1, Distance, 0.0, x4, 0.0, -Distance]
geo_t = tuple(geo)
dst_ds.SetProjection(proj)
dst_ds.GetRasterBand(1).SetNoDataValue(-9999)
dst_ds.SetGeoTransform(geo_t)
dst_ds.GetRasterBand(1).WriteArray(DataTot*0.0001)
dst_ds = None
sds = None
return()
|
apache-2.0
| -289,952,064,989,498,560
| 42.471186
| 378
| 0.573456
| false
| 3.747516
| false
| false
| false
|
Stbot/PyCrypt
|
first writes/b64cy.py
|
1
|
2516
|
###############################################################################
import b64_mod
from binascii import unhexlify
###############################################################################
def decode(bstring,flag = 0):
"""Decode, when flag is set to 0 or not set takes a Base64 strig and converts it to english plain test. when flag is set to 1 the input string is already in hex format."""
declist = []
outstring = ''
if flag ==0:
declist = b64_mod.hextodec(b64_mod.basetohex(bstring))
elif flag == 1:
declist = b64_mod.hextodec(bstring)
for x in declist:
outstring += ""+chr(x)
return outstring
##############################################################################
def encode(ascstring, key=None):
"""Given an ascii english string of text with any quotes properly escaped this will encode it into a Base64 string."""
if key!=None:
if len(key)<len(ascstring):
key = keylen(b64_mod.ascitohex(ascstring),key)
outlist = []
for x in ascstring:
outlist.append(ord(x))
return b64_mod.hextobase(''.join(b64_mod.dec_to_hex(outlist)))
##############################################################################
def hexorsum(hexstring1, hexstring2):
"""Calculates the Xor sum of 2 equal length hex strings"""
binlist1 = []
binlist2 = []
binstring1 = b64_mod.hextobin(hexstring1)
binstring2 = b64_mod.hextobin(hexstring2)
for x in binstring1:
binlist1.append(x)
for x in binstring2:
binlist2.append(x)
sumlist = []
sumstring = ''
#print len(binlist1)
#print len(binlist2)
for x in range (len(binlist1)):
if binlist1[x] == binlist2[x]:
sumlist.append('0')
elif binlist1[x] != binlist2[x]:
sumlist.append('1')
sumstring = ''.join(sumlist)
return b64_mod.bintohex(sumstring)
##############################################################################
def keylen(hexstring, key, flag =0):
if flag == 0:
key = b64_mod.ascitohex(key)
while len(hexstring) != len(key):
if len(key)>len(hexstring):
key = key[:len(key)-1]
if len(key)<len(hexstring):
key+=key
return key
##############################################################################
def repkeyxor_encoder(text, key):
text = b64_mod.ascitohex(text)
if len(key)<len(text):
key = keylen(text,key)
return hexorsum(text,key)
|
mit
| -6,056,610,221,976,268,000
| 36
| 175
| 0.49841
| false
| 3.835366
| false
| false
| false
|
googleapis/python-api-core
|
tests/unit/test_client_info.py
|
1
|
2488
|
# Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.api_core import client_info
def test_constructor_defaults():
info = client_info.ClientInfo()
assert info.python_version is not None
assert info.grpc_version is not None
assert info.api_core_version is not None
assert info.gapic_version is None
assert info.client_library_version is None
assert info.rest_version is None
def test_constructor_options():
info = client_info.ClientInfo(
python_version="1",
grpc_version="2",
api_core_version="3",
gapic_version="4",
client_library_version="5",
user_agent="6",
rest_version="7",
)
assert info.python_version == "1"
assert info.grpc_version == "2"
assert info.api_core_version == "3"
assert info.gapic_version == "4"
assert info.client_library_version == "5"
assert info.user_agent == "6"
assert info.rest_version == "7"
def test_to_user_agent_minimal():
info = client_info.ClientInfo(
python_version="1", api_core_version="2", grpc_version=None
)
user_agent = info.to_user_agent()
assert user_agent == "gl-python/1 gax/2"
def test_to_user_agent_full():
info = client_info.ClientInfo(
python_version="1",
grpc_version="2",
api_core_version="3",
gapic_version="4",
client_library_version="5",
user_agent="app-name/1.0",
)
user_agent = info.to_user_agent()
assert user_agent == "app-name/1.0 gl-python/1 grpc/2 gax/3 gapic/4 gccl/5"
def test_to_user_agent_rest():
info = client_info.ClientInfo(
python_version="1",
grpc_version=None,
rest_version="2",
api_core_version="3",
gapic_version="4",
client_library_version="5",
user_agent="app-name/1.0",
)
user_agent = info.to_user_agent()
assert user_agent == "app-name/1.0 gl-python/1 rest/2 gax/3 gapic/4 gccl/5"
|
apache-2.0
| -3,570,916,829,049,336,300
| 27.272727
| 79
| 0.64791
| false
| 3.450763
| false
| false
| false
|
ItsCalebJones/SpaceLaunchNow-Server
|
api/v330/spacestation/views.py
|
1
|
1860
|
from rest_framework.viewsets import ModelViewSet
from rest_framework.filters import SearchFilter, OrderingFilter
from django_filters.rest_framework import DjangoFilterBackend
from api.models import *
from api.permission import HasGroupPermission
from api.v330.spacestation.serializers import SpaceStationDetailedSerializer, SpaceStationSerializer
class SpaceStationViewSet(ModelViewSet):
"""
API endpoint that allows Space Stations to be viewed.
GET:
Return a list of all the existing space stations.
FILTERS:
Parameters - 'name', 'status', 'owners', 'orbit', 'type', 'owners__name', 'owners__abrev'
Example - /api/3.3.0/spacestation/?status=Active
SEARCH EXAMPLE:
Example - /api/3.3.0/spacestation/?search=ISS
Searches through 'name', 'owners__name', 'owners__abbrev'
ORDERING:
Fields - 'id', 'status', 'type', 'founded', 'volume'
Example - /api/3.3.0/spacestation/?ordering=id
"""
def get_serializer_class(self):
mode = self.request.query_params.get("mode", "normal")
if self.action == 'retrieve' or mode == "detailed":
return SpaceStationDetailedSerializer
else:
return SpaceStationSerializer
queryset = SpaceStation.objects.all()
permission_classes = [HasGroupPermission]
permission_groups = {
'retrieve': ['_Public'], # retrieve can be accessed without credentials (GET 'site.com/api/foo/1')
'list': ['_Public'] # list returns None and is therefore NOT accessible by anyone (GET 'site.com/api/foo')
}
filter_backends = (DjangoFilterBackend, SearchFilter, OrderingFilter)
filter_fields = ('name', 'status', 'owners', 'orbit', 'type', 'owners__name', 'owners__abbrev')
search_fields = ('$name', 'owners__name', 'owners__abbrev')
ordering_fields = ('id', 'status', 'type', 'founded', 'volume')
|
apache-2.0
| -6,008,000,186,509,618,000
| 39.456522
| 114
| 0.687634
| false
| 3.858921
| false
| false
| false
|
luan-th-nguyen/seisflows_ndt
|
seisflows/config.py
|
1
|
6784
|
import copy_reg
import imp
import os
import re
import sys
import types
from importlib import import_module
from os.path import abspath, join, exists
from seisflows.tools import msg
from seisflows.tools.err import ParameterError
from seisflows.tools import unix
from seisflows.tools.tools import loadjson, loadobj, loadpy, savejson, saveobj
from seisflows.tools.tools import module_exists, package_exists
# SeisFlows consists of interacting 'system', 'preprocess', 'solver', 'postprocess', 'optimize', and 'workflow' objects. Each corresponds simultaneously to a module in the SeisFlows source code, a class that is instantiated and made accessible via sys.modules, and a parameter in a global dictionary. Once in memory, these objects can be thought of as comprising the complete 'state' of a SeisFlows session
# The following list is one of the few hardwired aspects of the whole SeisFlows package. Any changes may result in circular imports or other problems
names = []
names += ['system']
names += ['preprocess']
names += ['solver']
names += ['postprocess']
names += ['optimize']
names += ['workflow']
def config():
""" Instantiates SeisFlows objects and makes them globally accessible by
registering them in sys.modules
"""
# parameters and paths must already be loaded
# (normally this is done by sfsubmit)
assert 'seisflows_parameters' in sys.modules
assert 'seisflows_paths' in sys.modules
# check if objects already exist on disk
if exists(_output()):
print msg.WarningOverwrite
sys.exit()
# instantiate and register objects
for name in names:
sys.modules['seisflows_'+name] = custom_import(name)()
# error checking
for name in names:
sys.modules['seisflows_'+name].check()
if not hasattr(sys.modules['seisflows_parameters'], 'workflow'.upper()):
print msg.MissingParameter_Worfklow
sys.exit(-1)
if not hasattr(sys.modules['seisflows_parameters'], 'system'.upper()):
print msg.MissingParameter_System
sys.exit(-1)
def save():
""" Exports session to disk
"""
unix.mkdir(_output())
for name in ['parameters', 'paths']:
fullfile = join(_output(), 'seisflows_'+name+'.json')
savejson(fullfile, sys.modules['seisflows_'+name].__dict__)
for name in names:
fullfile = join(_output(), 'seisflows_'+name+'.p')
saveobj(fullfile, sys.modules['seisflows_'+name])
def load(path):
""" Imports session from disk
"""
for name in ['parameters', 'paths']:
fullfile = join(_full(path), 'seisflows_'+name+'.json')
sys.modules['seisflows_'+name] = Dict(loadjson(fullfile))
for name in names:
fullfile = join(_full(path), 'seisflows_'+name+'.p')
sys.modules['seisflows_'+name] = loadobj(fullfile)
class Dict(object):
""" Dictionary-like object for holding parameters or paths
"""
def __iter__(self):
return iter(sorted(self.__dict__.keys()))
def __getattr__(self, key):
return self.__dict__[key]
def __getitem__(self, key):
return self.__dict__[key]
def __setattr__(self, key, val):
if key in self.__dict__:
raise TypeError("Once defined, parameters cannot be changed.")
self.__dict__[key] = val
def __delattr__(self, key):
if key in self.__dict__:
raise TypeError("Once defined, parameters cannot be deleted.")
raise KeyError
def update(self, newdict):
super(Dict, self).__setattr__('__dict__', newdict)
def __init__(self, newdict):
self.update(newdict)
class Null(object):
""" Always and reliably does nothing
"""
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return self
def __nonzero__(self):
return False
def __getattr__(self, key):
return self
def __setattr__(self, key, val):
return self
def __delattr__(self, key):
return self
def custom_import(*args):
""" Imports SeisFlows module and extracts class of same name. For example,
custom_import('workflow', 'inversion')
imports 'seisflows.workflow.inversion' and, from this module, extracts
class 'inversion'.
"""
# parse input arguments
if len(args) == 0:
raise Exception(msg.ImportError1)
if args[0] not in names:
raise Exception(msg.ImportError2)
if len(args) == 1:
args += (_try(args[0]),)
if not args[1]:
return Null
# generate package list
packages = ['seisflows']
# does module exist?
_exists = False
for package in packages:
full_dotted_name = package+'.'+args[0]+'.'+args[1]
if module_exists(full_dotted_name):
_exists = True
break
if not _exists:
raise Exception(msg.ImportError3 %
(args[0], args[1], args[0].upper()))
# import module
module = import_module(full_dotted_name)
# extract class
if hasattr(module, args[1]):
return getattr(module, args[1])
else:
raise Exception(msg.ImportError4 %
(args[0], args[1], args[1]))
def tilde_expand(mydict):
""" Expands tilde character in path strings
"""
for key,val in mydict.items():
if type(val) not in [str, unicode]:
raise Exception
if val[0:2] == '~/':
mydict[key] = os.getenv('HOME') +'/'+ val[2:]
return mydict
# utility functions
def _par(key):
return sys.modules['seisflows_parameters'][key.upper()]
def _path(key):
return sys.modules['seisflows_paths'][key.upper()]
def _try(key):
try:
return _par(key)
except KeyError:
return None
def _output():
try:
return _full(_path('output'))
except:
return _full(join('.', 'output'))
def _full(path):
try:
return join(abspath(path), '')
except:
raise IOError
# the following code changes how instance methods are handled by pickle. placing it here, in this module, ensures that pickle changes will be in effect for all SeisFlows workflows
# for relevant discussion, see stackoverflow thread "Can't pickle <type 'instancemethod'> when using python's multiprocessing Pool.map()"
def _pickle_method(method):
func_name = method.im_func.__name__
obj = method.im_self
cls = method.im_class
return _unpickle_method, (func_name, obj, cls)
def _unpickle_method(func_name, obj, cls):
for cls in cls.mro():
try:
func = cls.__dict__[func_name]
except KeyError:
pass
else:
break
return func.__get__(obj, cls)
copy_reg.pickle(types.MethodType, _pickle_method, _unpickle_method)
|
bsd-2-clause
| -5,586,993,668,159,306,000
| 26.803279
| 406
| 0.627064
| false
| 3.876571
| false
| false
| false
|
ChristophKirst/ClearMap
|
ClearMap/IO/NRRD.py
|
1
|
20940
|
#!/usr/bin/env python
# encoding: utf-8
"""
Interface to NRRD volumetric image data files.
The interface is based on nrrd.py, an all-python (and numpy)
implementation for reading and writing nrrd files.
See http://teem.sourceforge.net/nrrd/format.html for the specification.
Example:
>>> import os, numpy
>>> import ClearMap.Settings as settings
>>> import ClearMap.IO.NRRD as nrrd
>>> filename = os.path.join(settings.ClearMapPath, 'Test/Data/Nrrd/test.nrrd');
>>> data = nrrd.readData(filename);
>>> print data.shape
(20, 50, 10)
Author
""""""
Copyright 2011 Maarten Everts and David Hammond.
Modified to integrate into ClearMap framework by Christoph Kirst, The Rockefeller University, New York City, 2015
"""
import numpy as np
import gzip
import bz2
import os.path
from datetime import datetime
import ClearMap.IO as io
class NrrdError(Exception):
"""Exceptions for Nrrd class."""
pass
#This will help prevent loss of precision
#IEEE754-1985 standard says that 17 decimal digits is enough in all cases.
def _convert_to_reproducible_floatingpoint( x ):
if type(x) == float:
value = '{:.16f}'.format(x).rstrip('0').rstrip('.') # Remove trailing zeros, and dot if at end
else:
value = str(x)
return value
_TYPEMAP_NRRD2NUMPY = {
'signed char': 'i1',
'int8': 'i1',
'int8_t': 'i1',
'uchar': 'u1',
'unsigned char': 'u1',
'uint8': 'u1',
'uint8_t': 'u1',
'short': 'i2',
'short int': 'i2',
'signed short': 'i2',
'signed short int': 'i2',
'int16': 'i2',
'int16_t': 'i2',
'ushort': 'u2',
'unsigned short': 'u2',
'unsigned short int': 'u2',
'uint16': 'u2',
'uint16_t': 'u2',
'int': 'i4',
'signed int': 'i4',
'int32': 'i4',
'int32_t': 'i4',
'uint': 'u4',
'unsigned int': 'u4',
'uint32': 'u4',
'uint32_t': 'u4',
'longlong': 'i8',
'long long': 'i8',
'long long int': 'i8',
'signed long long': 'i8',
'signed long long int': 'i8',
'int64': 'i8',
'int64_t': 'i8',
'ulonglong': 'u8',
'unsigned long long': 'u8',
'unsigned long long int': 'u8',
'uint64': 'u8',
'uint64_t': 'u8',
'float': 'f4',
'double': 'f8',
'block': 'V'
}
_TYPEMAP_NUMPY2NRRD = {
'i1': 'int8',
'u1': 'uint8',
'i2': 'int16',
'u2': 'uint16',
'i4': 'int32',
'u4': 'uint32',
'i8': 'int64',
'u8': 'uint64',
'f4': 'float',
'f8': 'double',
'V': 'block'
}
_NUMPY2NRRD_ENDIAN_MAP = {
'<': 'little',
'L': 'little',
'>': 'big',
'B': 'big'
}
def parse_nrrdvector(inp):
"""Parse a vector from a nrrd header, return a list."""
assert inp[0] == '(', "Vector should be enclosed by parenthesis."
assert inp[-1] == ')', "Vector should be enclosed by parenthesis."
return [_convert_to_reproducible_floatingpoint(x) for x in inp[1:-1].split(',')]
def parse_optional_nrrdvector(inp):
"""Parse a vector from a nrrd header that can also be none."""
if (inp == "none"):
return inp
else:
return parse_nrrdvector(inp)
_NRRD_FIELD_PARSERS = {
'dimension': int,
'type': str,
'sizes': lambda fieldValue: [int(x) for x in fieldValue.split()],
'endian': str,
'encoding': str,
'min': float,
'max': float,
'oldmin': float,
'old min': float,
'oldmax': float,
'old max': float,
'lineskip': int,
'line skip': int,
'byteskip': int,
'byte skip': int,
'content': str,
'sample units': str,
'datafile': str,
'data file': str,
'spacings': lambda fieldValue: [_convert_to_reproducible_floatingpoint(x) for x in fieldValue.split()],
'thicknesses': lambda fieldValue: [_convert_to_reproducible_floatingpoint(x) for x in fieldValue.split()],
'axis mins': lambda fieldValue: [_convert_to_reproducible_floatingpoint(x) for x in fieldValue.split()],
'axismins': lambda fieldValue: [_convert_to_reproducible_floatingpoint(x) for x in fieldValue.split()],
'axis maxs': lambda fieldValue: [_convert_to_reproducible_floatingpoint(x) for x in fieldValue.split()],
'axismaxs': lambda fieldValue: [_convert_to_reproducible_floatingpoint(x) for x in fieldValue.split()],
'centerings': lambda fieldValue: [str(x) for x in fieldValue.split()],
'labels': lambda fieldValue: [str(x) for x in fieldValue.split()],
'units': lambda fieldValue: [str(x) for x in fieldValue.split()],
'kinds': lambda fieldValue: [str(x) for x in fieldValue.split()],
'space': str,
'space dimension': int,
'space units': lambda fieldValue: [str(x) for x in fieldValue.split()],
'space origin': parse_nrrdvector,
'space directions': lambda fieldValue:
[parse_optional_nrrdvector(x) for x in fieldValue.split()],
'measurement frame': lambda fieldValue:
[parse_nrrdvector(x) for x in fieldValue.split()],
}
_NRRD_REQUIRED_FIELDS = ['dimension', 'type', 'encoding', 'sizes']
# The supported field values
_NRRD_FIELD_ORDER = [
'type',
'dimension',
'space dimension',
'space',
'sizes',
'space directions',
'kinds',
'endian',
'encoding',
'min',
'max',
'oldmin',
'old min',
'oldmax',
'old max',
'content',
'sample units',
'spacings',
'thicknesses',
'axis mins',
'axismins',
'axis maxs',
'axismaxs',
'centerings',
'labels',
'units',
'space units',
'space origin',
'measurement frame',
'data file']
def _determine_dtype(fields):
"""Determine the numpy dtype of the data."""
# Check whether the required fields are there
for field in _NRRD_REQUIRED_FIELDS:
if field not in fields:
raise NrrdError('Nrrd header misses required field: "%s".' % (field))
# Process the data type
np_typestring = _TYPEMAP_NRRD2NUMPY[fields['type']]
if np.dtype(np_typestring).itemsize > 1:
if 'endian' not in fields:
raise NrrdError('Nrrd header misses required field: "endian".')
if fields['endian'] == 'big':
np_typestring = '>' + np_typestring
elif fields['endian'] == 'little':
np_typestring = '<' + np_typestring
return np.dtype(np_typestring)
def _read_data(fields, filehandle, filename=None):
"""Read the actual data into a numpy structure."""
data = np.zeros(0)
# Determine the data type from the fields
dtype = _determine_dtype(fields)
# determine byte skip, line skip, and data file (there are two ways to write them)
lineskip = fields.get('lineskip', fields.get('line skip', 0))
byteskip = fields.get('byteskip', fields.get('byte skip', 0))
datafile = fields.get("datafile", fields.get("data file", None))
datafilehandle = filehandle
if datafile is not None:
# If the datafile path is absolute, don't muck with it. Otherwise
# treat the path as relative to the directory in which the detached
# header is in
if os.path.isabs(datafile):
datafilename = datafile
else:
datafilename = os.path.join(os.path.dirname(filename), datafile)
datafilehandle = open(datafilename,'rb')
numPixels=np.array(fields['sizes']).prod()
totalbytes = dtype.itemsize * numPixels
if fields['encoding'] == 'raw':
if byteskip == -1: # This is valid only with raw encoding
datafilehandle.seek(-totalbytes, 2)
else:
for _ in range(lineskip):
datafilehandle.readline()
datafilehandle.read(byteskip)
data = np.fromfile(datafilehandle, dtype)
elif fields['encoding'] == 'gzip' or\
fields['encoding'] == 'gz':
gzipfile = gzip.GzipFile(fileobj=datafilehandle)
# Again, unfortunately, np.fromfile does not support
# reading from a gzip stream, so we'll do it like this.
# I have no idea what the performance implications are.
data = np.fromstring(gzipfile.read(), dtype)
elif fields['encoding'] == 'bzip2' or\
fields['encoding'] == 'bz2':
bz2file = bz2.BZ2File(fileobj=datafilehandle)
# Again, unfortunately, np.fromfile does not support
# reading from a gzip stream, so we'll do it like this.
# I have no idea what the performance implications are.
data = np.fromstring(bz2file.read(), dtype)
else:
raise NrrdError('Unsupported encoding: "%s"' % fields['encoding'])
if numPixels != data.size:
raise NrrdError('ERROR: {0}-{1}={2}'.format(numPixels,data.size,numPixels-data.size))
# dkh : eliminated need to reverse order of dimensions. nrrd's
# data layout is same as what numpy calls 'Fortran' order,
shape_tmp = list(fields['sizes'])
data = np.reshape(data, tuple(shape_tmp), order='F')
return data
def _validate_magic_line(line):
"""For NRRD files, the first four characters are always "NRRD", and
remaining characters give information about the file format version
"""
if not line.startswith('NRRD'):
raise NrrdError('Missing magic "NRRD" word. Is this an NRRD file?')
try:
if int(line[4:]) > 5:
raise NrrdError('NRRD file version too new for this library.')
except:
raise NrrdError('Invalid NRRD magic line: %s' % (line,))
return len(line)
def readHeader(filename):
"""Parse the fields in the nrrd header
nrrdfile can be any object which supports the iterator protocol and
returns a string each time its next() method is called — file objects and
list objects are both suitable. If csvfile is a file object, it must be
opened with the ‘b’ flag on platforms where that makes a difference
(e.g. Windows)
>>> readHeader(("NRRD0005", "type: float", "dimension: 3"))
{'type': 'float', 'dimension': 3, 'keyvaluepairs': {}}
>>> readHeader(("NRRD0005", "my extra info:=my : colon-separated : values"))
{'keyvaluepairs': {'my extra info': 'my : colon-separated : values'}}
"""
if isinstance(filename, basestring):
nrrdfile = open(filename,'rb');
else:
nrrdfile = filename;
# Collect number of bytes in the file header (for seeking below)
headerSize = 0
it = iter(nrrdfile)
headerSize += _validate_magic_line(next(it).decode('ascii'))
header = { 'keyvaluepairs': {} }
for raw_line in it:
headerSize += len(raw_line)
raw_line = raw_line.decode('ascii')
# Trailing whitespace ignored per the NRRD spec
line = raw_line.rstrip()
# Comments start with '#', no leading whitespace allowed
if line.startswith('#'):
continue
# Single blank line separates the header from the data
if line == '':
break
# Handle the <key>:=<value> lines first since <value> may contain a
# ': ' which messes up the <field>: <desc> parsing
key_value = line.split(':=', 1)
if len(key_value) is 2:
key, value = key_value
# TODO: escape \\ and \n ??
# value.replace(r'\\\\', r'\\').replace(r'\n', '\n')
header['keyvaluepairs'][key] = value
continue
# Handle the "<field>: <desc>" lines.
field_desc = line.split(': ', 1)
if len(field_desc) is 2:
field, desc = field_desc
## preceeding and suffixing white space should be ignored.
field = field.rstrip().lstrip()
desc = desc.rstrip().lstrip()
if field not in _NRRD_FIELD_PARSERS:
raise NrrdError('Unexpected field in nrrd header: "%s".' % field)
if field in header.keys():
raise NrrdError('Duplicate header field: "%s"' % field)
header[field] = _NRRD_FIELD_PARSERS[field](desc)
continue
# Should not reach here
raise NrrdError('Invalid header line: "%s"' % line)
# line reading was buffered; correct file pointer to just behind header:
nrrdfile.seek(headerSize)
return header
def readData(filename, **args):
"""Read nrrd file image data
Arguments:
filename (str): file name as regular expression
x,y,z (tuple): data range specifications
Returns:
array: image data
"""
with open(filename,'rb') as filehandle:
header = readHeader(filehandle)
#print header
data = _read_data(header, filehandle, filename)
#return (data, header)
#return data.transpose([1,0,2]);
data = io.readData(data, **args);
return data;
def _format_nrrd_list(fieldValue) :
return ' '.join([_convert_to_reproducible_floatingpoint(x) for x in fieldValue])
def _format_nrrdvector(v) :
return '(' + ','.join([_convert_to_reproducible_floatingpoint(x) for x in v]) + ')'
def _format_optional_nrrdvector(v):
if (v == 'none') :
return 'none'
else :
return _format_nrrdvector(v)
_NRRD_FIELD_FORMATTERS = {
'dimension': str,
'type': str,
'sizes': _format_nrrd_list,
'endian': str,
'encoding': str,
'min': str,
'max': str,
'oldmin': str,
'old min': str,
'oldmax': str,
'old max': str,
'lineskip': str,
'line skip': str,
'byteskip': str,
'byte skip': str,
'content': str,
'sample units': str,
'datafile': str,
'data file': str,
'spacings': _format_nrrd_list,
'thicknesses': _format_nrrd_list,
'axis mins': _format_nrrd_list,
'axismins': _format_nrrd_list,
'axis maxs': _format_nrrd_list,
'axismaxs': _format_nrrd_list,
'centerings': _format_nrrd_list,
'labels': _format_nrrd_list,
'units': _format_nrrd_list,
'kinds': _format_nrrd_list,
'space': str,
'space dimension': str,
'space units': _format_nrrd_list,
'space origin': _format_nrrdvector,
'space directions': lambda fieldValue: ' '.join([_format_optional_nrrdvector(x) for x in fieldValue]),
'measurement frame': lambda fieldValue: ' '.join([_format_optional_nrrdvector(x) for x in fieldValue]),
}
def _write_data(data, filehandle, options):
# Now write data directly
#rawdata = data.transpose([2,0,1]).tostring(order = 'C')
rawdata = data.transpose([2,1,0]).tostring(order = 'C');
if options['encoding'] == 'raw':
filehandle.write(rawdata)
elif options['encoding'] == 'gzip':
gzfileobj = gzip.GzipFile(fileobj = filehandle)
gzfileobj.write(rawdata)
gzfileobj.close()
elif options['encoding'] == 'bz2':
bz2fileobj = bz2.BZ2File(fileobj = filehandle)
bz2fileobj.write(rawdata)
bz2fileobj.close()
else:
raise NrrdError('Unsupported encoding: "%s"' % options['encoding'])
def writeData(filename, data, options={}, separateHeader=False, x = all, y = all, z = all):
"""Write data to nrrd file
Arguments:
filename (str): file name as regular expression
data (array): image data
options (dict): options dictionary
separateHeader (bool): write a separate header file
Returns:
str: nrrd output file name
To sample data use `options['spacings'] = [s1, s2, s3]` for
3d data with sampling deltas `s1`, `s2`, and `s3` in each dimension.
"""
data = io.dataToRange(data, x = x, y = y, z = z);
# Infer a number of fields from the ndarray and ignore values
# in the options dictionary.
options['type'] = _TYPEMAP_NUMPY2NRRD[data.dtype.str[1:]]
if data.dtype.itemsize > 1:
options['endian'] = _NUMPY2NRRD_ENDIAN_MAP[data.dtype.str[:1]]
# if 'space' is specified 'space dimension' can not. See http://teem.sourceforge.net/nrrd/format.html#space
if 'space' in options.keys() and 'space dimension' in options.keys():
del options['space dimension']
options['dimension'] = data.ndim
dsize = list(data.shape);
#dsize[0:2] = [dsize[1], dsize[0]];
options['sizes'] = dsize;
# The default encoding is 'gzip'
if 'encoding' not in options:
options['encoding'] = 'gzip'
# A bit of magic in handling options here.
# If *.nhdr filename provided, this overrides `separate_header=False`
# If *.nrrd filename provided AND separate_header=True, separate files
# written.
# For all other cases, header & data written to same file.
if filename[-5:] == '.nhdr':
separate_header = True
if 'data file' not in options:
datafilename = filename[:-4] + str('raw')
if options['encoding'] == 'gzip':
datafilename += '.gz'
options['data file'] = datafilename
else:
datafilename = options['data file']
elif filename[-5:] == '.nrrd' and separate_header:
separate_header = True
datafilename = filename
filename = filename[:-4] + str('nhdr')
else:
# Write header & data as one file
datafilename = filename;
separate_header = False;
with open(filename,'wb') as filehandle:
filehandle.write(b'NRRD0005\n')
filehandle.write(b'# This NRRD file was generated by pynrrd\n')
filehandle.write(b'# on ' +
datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S').encode('ascii') +
b'(GMT).\n')
filehandle.write(b'# Complete NRRD file format specification at:\n');
filehandle.write(b'# http://teem.sourceforge.net/nrrd/format.html\n');
# Write the fields in order, this ignores fields not in _NRRD_FIELD_ORDER
for field in _NRRD_FIELD_ORDER:
if field in options:
outline = (field + ': ' +
_NRRD_FIELD_FORMATTERS[field](options[field]) +
'\n').encode('ascii')
filehandle.write(outline)
d = options.get('keyvaluepairs', {})
for (k,v) in sorted(d.items(), key=lambda t: t[0]):
outline = (str(k) + ':=' + str(v) + '\n').encode('ascii')
filehandle.write(outline)
# Write the closing extra newline
filehandle.write(b'\n')
# If a single file desired, write data
if not separate_header:
_write_data(data, filehandle, options)
# If separate header desired, write data to different file
if separate_header:
with open(datafilename, 'wb') as datafilehandle:
_write_data(data, datafilehandle, options)
return filename;
def dataSize(filename, **args):
"""Read data size from nrrd image
Arguments:
filename (str): file name as regular expression
x,y,z (tuple): data range specifications
Returns:
tuple: data size
"""
header = readHeader(filename);
dims = header['sizes'];
#dims[0:2] = [dims[1], dims[0]];
return io.dataSizeFromDataRange(dims, **args);
def dataZSize(filename, z = all, **args):
"""Read data z size from nrrd image
Arguments:
filename (str): file name as regular expression
z (tuple): z data range specification
Returns:
int: z data size
"""
header = readHeader(filename);
dims = header['sizes'];
if len(dims) > 2:
return io.toDataSize(dims[2], r = z);
else:
return None;
def copyData(source, sink):
"""Copy an nrrd file from source to sink
Arguments:
source (str): file name pattern of source
sink (str): file name pattern of sink
Returns:
str: file name of the copy
Notes:
Todo: dealt with nrdh header files!
"""
io.copyFile(source, sink);
def test():
"""Test NRRD IO module"""
import ClearMap.IO.NRRD as self
reload(self)
from ClearMap.Settings import ClearMapPath
import os
import numpy
"""Test NRRD module"""
basedir = ClearMapPath;
fn = os.path.join(basedir, 'Test/Data/Nrrd/test.nrrd')
data = numpy.random.rand(20,50,10);
data[5:15, 20:45, 2:9] = 0;
reload(self)
print "writing nrrd image to: " + fn;
self.writeData(fn, data);
ds = self.dataSize(fn);
print "dataSize: %s" % str(ds);
print "Loading raw image from: " + fn;
img = self.readData(fn);
print "Image size: " + str(img.shape)
diff = img - data;
print (diff.max(), diff.min())
#some uint type
print "writing raw image to: " + fn;
udata = data * 10;
udata = udata.astype('uint16');
self.writeData(fn, udata);
print "Loading raw image from: " + fn;
img = self.readData(fn);
print "Image size: " + str(img.shape)
diff = img - udata;
print (diff.max(), diff.min())
#dataSize
print "dataSize is %s" % str(self.dataSize(fn))
print "dataZSize is %s" % str(self.dataZSize(fn))
if __name__ == "__main__":
test();
|
gpl-3.0
| -2,766,026,941,562,460,000
| 31.009174
| 117
| 0.596542
| false
| 3.508884
| false
| false
| false
|
mdevaev/slog
|
src/remote.py
|
1
|
1202
|
# -*- mode: python; coding: utf-8; -*-
import dbus
import dbus.service, dbus.mainloop.glib
class Remote:
def __init__(self):
bus = dbus.SessionBus()
slog_obj = bus.get_object("org.LightLang.SLog", "/SLog")
self.iface = dbus.Interface(slog_obj, "org.LightLang.SLogInterface")
def __spy_toggle(self):
self.iface.spy_toggle()
def __window_toggle(self):
self.iface.toggle()
def __show(self):
self.iface.show()
def execute(self, cmd):
if cmd == "toggle":
self.__window_toggle()
elif cmd == "spy-toggle":
self.__spy_toggle()
elif cmd == "show":
self.__show()
class SLogDBus(dbus.service.Object):
def __init__(self, interface, obj_path = "/SLog"):
self.interface = interface
bus = dbus.SessionBus()
bus_name = dbus.service.BusName("org.LightLang.SLog", bus)
dbus.service.Object.__init__(self, bus_name, obj_path)
@dbus.service.method("org.LightLang.SLogInterface")
def spy_toggle(self):
self.interface.spy_action.activate()
@dbus.service.method("org.LightLang.SLogInterface")
def toggle(self):
self.interface.window_toggle()
@dbus.service.method("org.LightLang.SLogInterface")
def show(self):
self.interface.hide()
self.interface.app_show()
|
gpl-2.0
| -6,164,688,821,272,691,000
| 24.041667
| 70
| 0.682196
| false
| 2.855107
| false
| false
| false
|
jabumaho/MNIST-neural-network
|
network.py
|
1
|
3424
|
import numpy as np
def sgm(x, derivative=False):
if not derivative:
return 1/(1+np.exp(-x))
else:
return sgm(x) * (1 - sgm(x))
def linear(x, derivative=False):
if not derivative:
return x
else:
return 1
class NeuralNetwork:
layerCount = 0
shape = None
weights = []
layerTransferFunc = []
def __init__(self, layerSize, layerTransferFunc=None):
self.layerCount = len(layerSize) - 1
self.shape = layerSize
self._layerInput = []
self._layerOutput = []
self._previousWeightDelta = []
for (l1, l2) in zip(layerSize[:-1], layerSize[1:]):
self.weights.append(np.random.normal(scale=0.1, size=(l2, l1 + 1)))
self._previousWeightDelta.append(np.zeros(shape=(l2, l1 + 1)))
if layerTransferFunc is None:
layerTransferFunc = []
for i in range(self.layerCount):
if i == self.layerCount - 1:
layerTransferFunc.append(sgm)
else:
layerTransferFunc.append(sgm)
else:
if len(layerTransferFunc) != len(layerSize):
raise ValueError("Incompatible no of transfer functions.")
elif layerTransferFunc[0] is not None:
raise ValueError("no transfer functions for input layer.")
else:
layerTransferFunc = layerTransferFunc[1:]
self.layerTransferFunc = layerTransferFunc
def run(self, inputr):
lnCases = inputr.shape[0]
self._layerInput = []
self._layerOutput = []
for i in range(self.layerCount):
if i == 0:
layerInput = self.weights[0].dot(np.vstack([inputr.T, np.ones([1, lnCases])]))
else:
layerInput = self.weights[i].dot(np.vstack([self._layerOutput[-1], np.ones([1, lnCases])]))
self._layerInput.append(layerInput)
self._layerOutput.append(self.layerTransferFunc[i](layerInput))
return self._layerOutput[-1].T
def trainEpoch(self, inputt, target, trainingRate=0.5, momentum=0.5):
delta = []
lnCases = inputt.shape[0]
self.run(inputt)
for i in reversed(range(self.layerCount)):
if i == self.layerCount - 1:
output_delta = self._layerOutput[i] - target.T
error = 0.5 * np.sum(output_delta**2)
delta.append(output_delta * self.layerTransferFunc[i](self._layerInput[i], True))
else:
deltaPullback = self.weights[i + 1].T.dot(delta[-1])
delta.append(deltaPullback[:-1, :] * self.layerTransferFunc[i](self._layerInput[i], True))
for i in range(self.layerCount):
deltaIndex = self.layerCount - 1 - i
if i == 0:
layerOutput = np.vstack([inputt.T, np.ones([1, lnCases])])
else:
layerOutput = np.vstack([self._layerOutput[i - 1], np.ones([1, self._layerOutput[i - 1].shape[1]])])
currentweightDelta = np.sum(layerOutput[None, :, :].transpose(2, 0, 1) * delta[deltaIndex][None, :, :].transpose(2, 1, 0), axis=0)
weightDelta = trainingRate * currentweightDelta + momentum * self._previousWeightDelta[i]
self.weights[i] -= weightDelta
self._previousWeightDelta[i] = weightDelta
return error
def test_network(self, inputtest, target):
self.run(inputtest)
output_delta = self._layerOutput[self.layerCount - 1] - target.T
return 0.5 * np.sum(output_delta**2)
def nudge(self, scale):
for i in xrange(len(self.weights)):
for j in xrange(len(self.weights[i])):
for k in xrange(len(self.weights[i][j])):
w = self.weights[i][j][k]
w *= scale
u = np.random.normal(scale=abs(w))
self.weights[i][j][k] += u
|
gpl-3.0
| -1,273,426,215,257,752,800
| 28.035088
| 133
| 0.645736
| false
| 2.990393
| false
| false
| false
|
arulalant/mmDiagnosis
|
diagnosis1/extra/dirty/MULTIPLE PLOTS/por_landscape_2x3.py
|
1
|
2203
|
import cdms2
import cdutil
import numpy
import numpy.ma
import vcs
import os
import sys
import por_template_2x3_landscape as por_lanscape_2x3
x = por_lanscape_2x3.x
iso=x.createisofill('new1', 'ASD')
iso.levels = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
#([1, 5, 10, 15, 20, 25, 35, 45, 55, 60, 65, 70, 80])
#iso.levels=vcs.mkscale(0.,80.)
iso.fillareacolors = (246, 255, 252, 253, 254, 251, 140, 5, 171,
248, 249, 242, 239)
#iso.fillareacolors=vcs.getcolors(iso.levels)
iso.ext_1='y'
iso.ext_2='y'
iso.level_1=0
iso.level_2=1
hours=[24, 48, 72, 96, 120]
score_name= ['ts', 'pod', 'pofd', 'hr', 'far']
th_list=[0.1, 0.6, 1. , 3. , 5. , 7.]
file_name='/NCMRWF/Process_Files/T254/StatiScore/2010/Season/jjas/24/stati_spatial_distribution_score_24hr_jjas_2010_T254.nc'
f=cdms2.open(file_name)
for j in xrange(len(score_name)):
score_name_capital = score_name[j].upper()
for k in range(6):
score=TS=f(score_name[j], threshold = th_list[k])
title_plot='T254 D-01 %s %s THRESHOLD JJAS 2010' %(score_name_capital, str(th_list[k]))
if (k == 0):
x.plot(score, por_lanscape_2x3.leftOfTop_lscp, iso, title=title_plot, continents=1, bg=1)
elif (k == 1):
x.plot(score, por_lanscape_2x3.midOfTop_lscp, iso, title=title_plot, continents=1, bg=1)
elif (k == 2):
x.plot(score, por_lanscape_2x3.rightOfTop_lscp, iso, title=title_plot, continents=1, bg=1)
elif(k==3):
x.plot(score, por_lanscape_2x3.leftOfBot_lscp, iso, title=title_plot, continents=1, bg=1)
elif(k==4):
x.plot(score, por_lanscape_2x3.midOfBot_lscp, iso, title=title_plot, continents=1, bg=1)
elif(k==5):
x.plot(score, por_lanscape_2x3.rightOfBot_lscp, iso, title=title_plot, continents=1, bg=1)
else:
pass
out_f_name='/home/arulalan/Desktop/%s_2010_obs.png' %(score_name_capital)
x.png(out_f_name)
x.clear()
|
gpl-3.0
| -1,482,070,405,860,495,400
| 31.880597
| 125
| 0.552429
| false
| 2.641487
| false
| false
| false
|
cedricpradalier/vrep_ros_ws
|
src/ar_loc_base/src/ar_loc_base/rover_pf.py
|
1
|
4147
|
import roslib; roslib.load_manifest('ar_loc_base')
import rospy
from numpy import *
from numpy.linalg import pinv, inv
from math import pi, sin, cos
from geometry_msgs.msg import *
import tf
import bisect
import threading
from rover_kinematics import *
class RoverPF(RoverKinematics):
def __init__(self, initial_pose, initial_uncertainty):
RoverKinematics.__init__(self)
self.initial_uncertainty = initial_uncertainty
self.lock = threading.Lock()
self.X = mat(vstack(initial_pose))
# Initialisation of the particle cloud around the initial position
self.N = 500
self.particles = [self.X + self.drawNoise(initial_uncertainty) for i in range(0,self.N)]
self.pa_pub = rospy.Publisher("~particles",PoseArray,queue_size=1)
def getRotation(self, theta):
R = mat(zeros((2,2)))
R[0,0] = cos(theta); R[0,1] = -sin(theta)
R[1,0] = sin(theta); R[1,1] = cos(theta)
return R
# Draw a vector uniformly around [0,0,0], scaled by norm
def drawNoise(self, norm):
if type(norm)==list:
return mat(vstack(norm)*(2*random.rand(3,1)-vstack([1,1,1])))
else:
return mat(multiply(norm,((2*random.rand(3,1)-vstack([1,1,1])))))
def predict(self, motor_state, drive_cfg, encoder_precision):
self.lock.acquire()
# The first time, we need to initialise the state
if self.first_run:
self.motor_state.copy(motor_state)
self.first_run = False
self.lock.release()
return
# Prepare odometry matrices (check rover_odo.py for usage)
iW = self.prepare_inversion_matrix(drive_cfg)
S = self.prepare_displacement_matrix(self.motor_state,motor_state,drive_cfg)
self.motor_state.copy(motor_state)
# Apply the particle filter prediction step here
# TODO
# self.particles = ...
self.lock.release()
def update_ar(self, Z, L, Uncertainty):
self.lock.acquire()
print "Update: L="+str(L.T)
# Implement particle filter update using landmarks here
# Note: the function bisect.bisect_left could be useful to implement
# the resampling process efficiently
# TODO
# self.particles = ...
self.lock.release()
def update_compass(self, angle, Uncertainty):
self.lock.acquire()
print "Update: C="+str(angle)
# Implement particle filter update using landmarks here
# Note: the function bisect.bisect_left could be useful to implement
# the resampling process efficiently
# TODO
# self.particles = ...
self.lock.release()
def updateMean(self):
X = mat(zeros((3,1)))
for x in self.particles:
X += x
self.X = X / len(self.particles)
return self.X
def publish(self, pose_pub, target_frame, stamp):
# Only compute the mean for plotting
self.updateMean()
pose = PoseStamped()
pose.header.frame_id = target_frame
pose.header.stamp = stamp
pose.pose.position.x = self.X[0,0]
pose.pose.position.y = self.X[1,0]
pose.pose.position.z = 0.0
Q = tf.transformations.quaternion_from_euler(0, 0, self.X[2,0])
pose.pose.orientation.x = Q[0]
pose.pose.orientation.y = Q[1]
pose.pose.orientation.z = Q[2]
pose.pose.orientation.w = Q[3]
pose_pub.publish(pose)
pa = PoseArray()
pa.header = pose.header
for p in self.particles:
po = Pose()
po.position.x = p[0,0]
po.position.y = p[1,0]
q = tf.transformations.quaternion_from_euler(0, 0, p[2,0])
po.orientation = Quaternion(*q)
pa.poses.append(po)
self.pa_pub.publish(pa)
def broadcast(self,br, target_frame, stamp):
br.sendTransform((self.X[0,0], self.X[1,0], 0),
tf.transformations.quaternion_from_euler(0, 0, self.X[2,0]),
stamp, "/%s/ground"%self.name, target_frame)
|
bsd-3-clause
| 2,330,022,787,027,257,000
| 32.991803
| 96
| 0.592235
| false
| 3.499578
| false
| false
| false
|
nblago/utils
|
src/model/BBFit.py
|
1
|
66521
|
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 22 10:57:34 2018
Class that enables to fit a black body function to a set of magntidues.
@author: nadiablago
@version: 0.22
"""
from __future__ import print_function
import matplotlib
from matplotlib import pylab as plt
import corner
from astropy import units as u
import astropy.constants as cnt
import os, sys
import numpy as np
import emcee
from scipy import stats
import extinction
from astropy.cosmology import FlatLambdaCDM
import warnings
#If PYSYN_CDBS is not defined, it adds the environment variable which points to the
#filter response files for the bands we are interested in.
if not 'PYSYN_CDBS' in os.environ.keys():
print ("Adding the Pysynphot environment:")
os.environ['PYSYN_CDBS'] = "/Users/USER/SOMEWHERE/pysynphot_files"
print ('PYSYN_CDBS environment variable set to: ', os.environ['PYSYN_CDBS'])
'''os.environ['PYSYN_CDBS'] = "/scratch/Software/pysynphot_files/cdbs/"
# Add the environment variable which points to the filter response files for the bands we are interested in.
if not 'PYSYN_CDBS' in os.environ.keys():
print("Adding the Pysynphot environment:")
os.environ['PYSYN_CDBS'] = "/scratch/Software/pysynphot_files/cdbs/"
print('PYSYN_CDBS environment variable set to: ', os.environ['PYSYN_CDBS'])'''
os.environ['PYSYN_CDBS'] = "/Users/nadiablago/Documents/Software/pysynphot_files/"
import pysynphot as ps
class BBFit:
def __init__(self):
'''
Constructor initializes all the parameters to
defaults.
'''
#Some predefined constants in the units we need them
self.c = cnt.c.to(u.cm/u.s).value #2.99792458e+10 #cm / s
self.h = cnt.h.to(u.erg * u.s).value #6.62607004e-27 #erg s
self.k_B = cnt.k_B.to(u.erg / u.K).value#1.38064852e-16 #erg / K
#Source parameters
self.av_host = 0
self.av_mw = 0
self.law = "Fitzpatrick"
self.law_mw = "Fitzpatrick"
#Black body models
self.initT1 = 10000 #K
self.initR1 = 1 # Rsun
self.initT2 = 3000 #K
self.initR2 = 1 # Rsun
self.z = None
self.distMpc = None #in Mpc
self.mjd = 0
#Power law models
self.alpha = 0.75
self.alphaerr1 = 0
self.alphaerr2 = 0
self.scale = 1
self.scaleerr1 = 0.1
self.scaleerr2 = 0.1
#Disk model (scale is already in the power law model)
#Stellar mass, radius, log accretion mass per year, outer radius of accretion disk
self.Mstar = 1
self.Mstarerr1 = 0.1
self.Mstarerr2 = 0.1
self.Rstar = 1
self.Rstarerr1 = 0.1
self.rstarerr2 = 0.1
self.logMacc = -8
self.logMaccerr1 = -9
self.logMaccerr2 = -9
self.R_out = 3
self.R_outerr1 = 1
self.R_outerr2 = 1
#Location for plots
self.plotdir = "../../data/plots"
#Location for fit results
self.resdir = "../../data/modelfits"
self.resfile = "fit_results.txt"
#MCMC parameters
self.method = 'ensemble' #or HA for Hastings
self.mhtune = True # tuning of the Metropolis-Hastings
self.niterations = 10000
self.burnin = 5000
self.threads = 10
self.nwalkers = 20
self.sampler = None
self.model = "BlackBody" #others are "BlackBody_Av" or "BlackBody2_Av", "PowerLaw", "PowerLaw_BlackBody"
#Input data parameters.
#The fitter will run either with magnitudes or with fluxes
self.mags = None
self.magerrs = None
self.bands = None
#Indicates whether the magnitude is in AB or Vega
self.photsys = None
self.wls = None
self.fluxes = None
self.fluxerrs = None
#Output
self.T = None
self.Terr1 = None
self.Terr2 = None
self.R = None
self.Rerr1 = None
self.Rerr2 = None
self.L = None
self.Lerr1 = None
self.Lerr2 = None
#Output for the secondary star
self.Tsec = None
self.Tsecerr1 = None
self.Tsecerr2 = None
self.Rsec = None
self.Rsecerr1 = None
self.Rsecerr2 = None
self.Lsec = None
self.Lsecerr1 = None
self.Lsecerr2 = None
self.cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725)
#Set the plotting characteristics
self._matplotlib_init()
self.banddic = {"Y": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/ctio_y_andicam.dat"),
"J": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/bessell_j_002.fits"),
"H": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/bessell_h_002.fits"),
"K": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/bessell_k_002.fits"),
"keck,J": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Keck_NIRC2.J.dat"),
"keck,H": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Keck_NIRC2.H.dat"),
"keck,Ks": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Keck_NIRC2.Ks.dat"),
"keck,K": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Keck_NIRC2.K.dat"),
"spitzer,3.6": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Spitzer_irac1_3.6.dat"),
"spitzer,4.5": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Spitzer_irac2_4.5.dat"),
"spitzer,5.8": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Spitzer_irac3_5.8.dat"),
"spitzer,8.0": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Spitzer_irac4_8.0.dat"),
"wise,w1": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/WISE_WISE.W1.dat"),
"wise,w2": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/WISE_WISE.W2.dat"),
"wise,w3": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/WISE_WISE.W3.dat"),
"wise,w4": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/WISE_WISE.W4.dat"),
"swift,uvw2": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/swift_uvw2_uvot.dat"),
"swift,uvm2": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/swift_uvm2_uvot.dat"),
"swift,uvw1": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/swift_uvw1_uvot.dat"),
"swift,u": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/swift_u_uvot.dat"),
"swift,b": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/swift_b_uvot.dat"),
"swift,v": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/swift_v_uvot.dat"),
"paranal,Y": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Paranal_VISTA.Y.dat"),
"paranal,Z": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Paranal_VISTA.Z.dat"),
"paranal,J": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Paranal_VISTA.J.dat"),
"paranal,H": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Paranal_VISTA.H.dat"),
"paranal,Ks": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Paranal_VISTA.Ks.dat"),
"omegacam,u": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Paranal_OmegaCAM.u_SDSS.dat"),
"omegacam,g": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Paranal_OmegaCAM.g_SDSS.dat"),
"omegacam,r": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Paranal_OmegaCAM.r_SDSS.dat"),
"omegacam,i": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Paranal_OmegaCAM.i_SDSS.dat"),
"omegacam,z": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Paranal_OmegaCAM.z_SDSS.dat"),
"omegacam,Halpha": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Paranal_OmegaCAM.Halpha.dat"),
"nirc2,j": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Keck_NIRC2.J.dat"),
"nirc2,h": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Keck_NIRC2.H.dat"),
"nirc2,ks": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Keck_NIRC2.Ks.dat")
}
def _matplotlib_init(self):
'''
Set up preferences on matplotlib plot appearance.
'''
matplotlib.rcParams['xtick.minor.size'] = 6
matplotlib.rcParams['xtick.major.size'] = 6
matplotlib.rcParams['ytick.major.size'] = 6
matplotlib.rcParams['xtick.minor.size'] = 4
matplotlib.rcParams['ytick.minor.size'] = 4
matplotlib.rcParams['lines.linewidth'] = 0.5
matplotlib.rcParams['axes.linewidth'] = 1.5
matplotlib.rcParams['font.size']= 14.0
matplotlib.rcParams['font.family']= 'sans-serif'
matplotlib.rcParams['xtick.major.width']= 2.
matplotlib.rcParams['ytick.major.width']= 2.
matplotlib.rcParams['ytick.direction']='in'
matplotlib.rcParams['xtick.direction']='in'
def _band2flux(self):
'''
Will transform the magnitude measurement into a flux measurement.
'''
wls = np.array([])
fluxes = np.array([])
fluxerr = np.array([])
#Create a black body spectrum with an arbitrary value
lam = np.linspace(100, 120000, 10000)
sp = ps.BlackBody(10000)
sp.convert('flam')
sp2 = self._model_2(lam, 10000, 1)
sp2 = sp2 * np.max(sp.flux) / np.max(sp2)
sp = ps.ArraySpectrum(lam, sp2)
for b, m, me, psys in zip(self.bands, self.mags, self.magerrs, self.photsys):
print ("Band,",b)
#Create the observation bandpass
try:
band = ps.ObsBandpass(b)
except ValueError:
#The band is not in the standard list
#We need to go to the dictionary to retrieve the transmission function.
band = ps.FileBandpass(self.banddic[b])
#band.waveunits.convert("angstrom")
#else:
# band.waveunits = ps.units.Angstrom
#Oftain the effective (average) wavelength
effwave = band.avgwave()
#Correct for Milky Way extinction
m = m - extinction.fitzpatrick99(np.array([effwave]), a_v=self.av_mw, unit='aa')[0]
#Normalize the spectrum to the magnitude of the observation
sp_norm = sp.renorm(m, psys, band, force="extrap")
#Observe with the band
obs = ps.Observation(sp_norm, band)
#Get the flux
flux = obs.effstim('flam')
wls = np.append(wls, effwave)
fluxes = np.append(fluxes, flux)
#Compute the error bars
flux_high = flux * 10**(0.4*me)
flux_low = flux * 10**(-0.4*me)
fluxerr = np.append(fluxerr, np.average([flux - flux_low, flux_high-flux]))
return wls, fluxes, fluxerr
def _model(self, lam, p):
'''
Returns the flux for the single BlackBody model for the wavelength introduced.
lam is in A.
p = (T, R)
'''
lam = lam * u.Angstrom
T = p[0] * u.K
R = (p[1] * u.Rsun).to(u.cm)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
Area = np.pi * (4 * np.pi * R**2)
flam = Area * (2*cnt.h*((cnt.c).to(u.cm/u.s))**2/( (lam.to(u.cm))**5))/ \
(np.exp((cnt.h*cnt.c)/(lam.to(u.m)*cnt.k_B*T))-1)
return flam.to(u.erg/u.s/u.Angstrom).value
def _model_2(self, lam, T, R):
'''
Return units: erg s-1 A-1
As we multiply by the area of the emitting source (in cm**2)
'''
lam = lam * u.Angstrom
T = T * u.K
R = (R * u.Rsun).to(u.cm)
Area = np.pi * (4 * np.pi * R**2)
flam = Area * (2*cnt.h*((cnt.c).to(u.cm/u.s))**2/( (lam.to(u.cm))**5))/ \
(np.exp((cnt.h*cnt.c)/(lam.to(u.m)*cnt.k_B*T))-1)
return flam.to(u.erg/u.s/u.Angstrom).value
def _model_av_r(self, lam, p):
'''
Return units: erg s-1 A-1
As we multiply by the area of the emitting source (in cm**2)
'''
T = p[0] * u.K
R = (p[1] * u.Rsun).to(u.cm)
a_v = p[2]
if a_v < 0:
return lam * np.inf
#Compute the effect of reddening as a flux factor
flux_red = 10**(-0.4 * extinction.fitzpatrick99(lam, a_v, unit='aa'))
lam = lam * u.Angstrom
area = np.pi * (4 * np.pi * R**2)
flam = area * (2*cnt.h*((cnt.c).to(u.cm/u.s))**2/( (lam.to(u.cm))**5))/ \
(np.exp((cnt.h*cnt.c)/(lam.to(u.m)*cnt.k_B*T))-1)
#Apply the reddening
flam = flam.to(u.erg/u.s/u.Angstrom).value * flux_red
return flam
def _model_av_r_2(self, lam, T, R, a_v):
'''
Return units: erg s-1 A-1
'''
return self._model_av_r(lam, (T, R, a_v))
def _model2_av(self, lam, p):
'''
Return units: erg s-1 A-1
As we multiply by the area of the emitting source (in cm**2)
'''
T1 = p[0] * u.K
R1 = (p[1] * u.Rsun).to(u.cm)
a_v = p[2]
T2 = p[3] * u.K
R2 = (p[4] * u.Rsun).to(u.cm)
#Compute the effect of reddening as a flux factor
flux_red = 10**(-0.4 * extinction.fitzpatrick99(lam, a_v, unit='aa'))
lam = lam * u.Angstrom
area1 = np.pi * (4 * np.pi * R1**2)
area2 = np.pi * (4 * np.pi * R2**2)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
flam1 = area1 * (2*cnt.h*((cnt.c).to(u.cm/u.s))**2/( (lam.to(u.cm))**5))/ \
(np.exp((cnt.h*cnt.c)/(lam.to(u.m)*cnt.k_B*T1))-1)
flam2 = area2 * (2*cnt.h*((cnt.c).to(u.cm/u.s))**2/( (lam.to(u.cm))**5))/ \
(np.exp((cnt.h*cnt.c)/(lam.to(u.m)*cnt.k_B*T2))-1)
flam = flam1 + flam2
#Apply the reddening
flam = flam.to(u.erg/u.s/u.Angstrom).value * flux_red
return flam
def _model2_av_2(self, lam, T1, R1, a_v, T2, R2):
'''
Return units: erg s-1 A-1
As we multiply by the area of the emitting source (in cm**2)
'''
return self._model2_av(lam, (T1, R1, a_v, T2, R2))
def _model2_av_r(self, lam, p):
'''
Return units: erg s-1 A-1
'''
T1 = p[0] #In K
R1 = p[1]*69570000000.0 #From Rsun to cm
a_v = p[2]
T2 = p[3]
R2 = p[4]*69570000000.0 #From Rsun to cm
lam = lam * 1e-8 #To cm
if a_v < 0:
return lam * np.inf
#We need an extra pi as it is integrated across all steradians
#The second factor is the surface of the black body
#The third ones is the Plank law
with warnings.catch_warnings():
warnings.simplefilter("ignore")
flam1 = np.pi * (4 * np.pi * R1**2) * ( (2*self.h*self.c**2)/( lam**5))/ (np.exp((self.h*self.c)/(lam*self.k_B*T1))-1)
flam2 = np.pi * (4 * np.pi * R2**2) * ( (2*self.h*self.c**2)/( lam**5))/ (np.exp((self.h*self.c)/(lam*self.k_B*T2))-1)
#Compute the effect of reddening as a flux factor
flux_red = 10**(-0.4 * extinction.fitzpatrick99(lam*1e8, a_v, unit='aa'))
flam = (flam1 + flam2) * flux_red *1e-8 #to erg / s / A
#Apply the reddening and transform to erg /s/ A from cm
return flam
def _model2_av_r_2(self, lam, T1, R1, a_v, T2, R2):
'''
Return units: erg s-1 A-1
As we multiply by the area of the emitting source (in cm**2)
'''
return self._model2_av_r(lam, (T1, R1, a_v, T2, R2))
def _model2_r(self, lam, p):
'''
Return units: erg s-1 A-1
'''
T1 = p[0] #In K
R1 = p[1]*69570000000.0 #From Rsun to cm
T2 = p[2]
R2 = p[3]*69570000000.0 #From Rsun to cm
lam = lam * 1e-8 #To cm
#We need an extra pi as it is integrated across all steradians
#The second factor is the surface of the black body
#The third ones is the Plank law
with warnings.catch_warnings():
warnings.simplefilter("ignore")
flam1 = np.pi * (4 * np.pi * R1**2) * ( (2*self.h*self.c**2)/( lam**5))/ (np.exp((self.h*self.c)/(lam*self.k_B*T1))-1)
flam2 = np.pi * (4 * np.pi * R2**2) * ( (2*self.h*self.c**2)/( lam**5))/ (np.exp((self.h*self.c)/(lam*self.k_B*T2))-1)
flam = (flam1 + flam2)*1e-8 #to erg / s / A
return flam
def _model2_r_2(self, lam, T1, R1, T2, R2):
'''
Return units: erg s-1 A-1
As we multiply by the area of the emitting source (in cm**2)
'''
return self._model2_r(lam, (T1, R1, T2, R2))
def _model_powerlaw(self, lam, p):
'''
Return units: erg s-1 A-1
As we multiply by the area of the emitting source (in cm**2)
'''
lam = lam * u.Angstrom
w0 = 4000 #p[0] #Refernce wavelength
alpha = p[0]
scale = p[1]
a_v = p[2]
f = ps.PowerLaw(w0, alpha)
f.convert('flam')
flam = np.interp(lam, f.wave, f.flux)
flux_red = 10**(-0.4 * extinction.fitzpatrick99(lam, a_v, unit='aa'))
area = 10**scale
return area * flam * flux_red #.to(u.erg/u.s/u.Angstrom).value
def _model_powerlaw_2(self, lam, alpha, scale, a_v):
'''
Return units: erg s-1 A-1
As we multiply by the area of the emitting source (in cm**2)
'''
return self._model_powerlaw(lam, (alpha, scale, a_v))
def _model_powerlaw_bb(self, lam, p):
'''
Return units: erg s-1 A-1
As we multiply by the area of the emitting source (in cm**2)
'''
w0 = 4000 #p[0] #Refernce wavelength
alpha = p[0]
scale = p[1]
T_bb = p[2]
R_bb = p[3]
bb_flux = self._model_2(lam, T_bb, R_bb)
lam = lam * u.Angstrom
f = ps.PowerLaw(w0, alpha)
f.convert('flam')
flam = np.interp(lam, f.wave, f.flux)
area = 10**scale
return area * flam + bb_flux
def _model_powerlaw_bb_2(self, lam, alpha, scale, T_bb, R_bb):
'''
Return units: erg s-1 A-1
'''
return self._model_powerlaw_bb(lam, (alpha, scale, T_bb, R_bb))
def _model_accretion_disk_old2(self, lam, Mstar, Rstar, logMacc, scale, R_out):
'''
Return units: erg s-1 A-1
As we multiply by the area of the emitting source (in cm**2)
'''
return self._model_accretion_disk_old(lam, (Mstar, Rstar, logMacc, scale, R_out))
def _model_accretion_disk_old(self, lam, p):
'''
Equation 1 from Kenyon, Hartmann, Hewett 1988.
'''
Mstar = p[0]
Rstar = p[1]
Macc = p[2]
scale = p[3]
R_out = p[4]
if Mstar<0 or Macc<-12 or Rstar<0.001 or scale<0 or R_out < Rstar:
return np.ones(len(lam))*np.inf
Macc = 10**Macc
R = np.linspace(Rstar,R_out,20)
dR = R[1] - R[0]
F_r = (3 * cnt.G * Mstar * u.Msun * Macc * u.Msun/u.year / 8 / np.pi / (u.Rsun*Rstar)**3) * (Rstar/R)**3 * (1 - (Rstar/R)**0.5)
F_r = F_r.to(u.erg/u.cm**2/u.s)
T_r = ((F_r / cnt.sigma_sb)**0.25).to(u.K)
T_max = 13000 * u.K *(Mstar)**0.25 * (Macc / 1e-5)**0.25 * (Rstar)**-0.75
#Cretae the disk model
#For each differential radii, we compute the black body spectra corresponding
# to the temperature at that radius, and scale it by the flux expected at that
# radius.
disk_model = []
for i, ri in enumerate(R):
if ri>Rstar and ri<=1.5*Rstar:
sp = ps.BlackBody(T_max.value)
#sp = ps.BlackBody(T_r[i].value)
else:
sp = ps.BlackBody(T_r[i].value)
sp.convert('flam')
tot_flux = sp.trapezoidIntegration(sp.wave, sp.flux)
#Compute the total emitted flux for the spherical area.
#Adopt the outer radius as the
dist_flux_fac = np.pi * ((ri+dR)**2 - ri**2) * (u.Rsun.to(u.cm))**2
scaled_flux = sp.flux / tot_flux * F_r[i].value #* dist_flux_fac
disk_model.append(scaled_flux)
disk = np.array(disk_model)
disk = np.nansum(disk, axis=0)
sp = ps.ArraySpectrum(sp.wave, disk)
#int_flux = sp.trapezoidIntegration(sp.wave, sp.flux)
int_flux = np.max(sp.flux)
#Normalize (recover) the integral flux from 1kpc
flux_norm= sp.flux #/int_flux
#sp_norm = ps.ArraySpectrum(sp.wave, flux_norm)
flux_norm = np.interp(lam, sp.wave, flux_norm)
#flux_red = 10**(-0.4 * extinction.fitzpatrick99(lam, a_v, unit='aa'))
return flux_norm #* scale #* flux_red
def _model_disk_T(self, R, Mstar, Rstar, logMacc):
F_r = (3 * cnt.G * Mstar * 10**float(logMacc) * (u.Msun**2/u.year)) \
/ (8 * np.pi * (u.Rsun*R)**3) \
* (1 - (Rstar/R)**0.5)
T_r = ((F_r / cnt.sigma_sb)**0.25).to(u.K)
#print (F_r, T_r)
mask = (R>=Rstar) * (R<=1.5*Rstar)
if np.count_nonzero(mask)>0:
T_max = 13000 * u.K *(Mstar)**0.25 * (10**float(logMacc) / 1e-5)**0.25 * (Rstar)**-0.75
T_r[mask] = T_max
#print (mask, "Tmax", T_max, np.count_nonzero(mask))
return T_r.value
def _model_accretion_disk2(self, lam, Mstar, Rstar, logMacc, R_out):
'''
Return units: erg s-1 A-1
As we multiply by the area of the emitting source (in cm**2)
'''
return self._model_accretion_disk(lam, (Mstar, Rstar, logMacc, R_out))
def _model_accretion_disk(self, lam, p):
Mstar = np.maximum(1e-6, p[0])
Rstar = np.maximum(1e-6, p[1])
logMacc = np.maximum(-12, np.minimum(-7, p[2]))
R_out = np.maximum(1e-6, p[3])
i = 45.0
#Deg to radians
i = np.deg2rad(i%360)
d = self.distMpc*(u.Mpc).to(u.cm)
R = np.linspace(Rstar, R_out, 30)*u.Rsun
nu = (cnt.c / (lam*u.Angstrom)).to(u.Hz)
T_r = self._model_disk_T(R.value, Mstar, Rstar, logMacc)
F_nu_arr = []
for ni in nu:
I_nu_r = R / (np.exp(cnt.h * ni/(cnt.k_B*T_r*u.K)) - 1)
I_flux = np.trapz(I_nu_r, R)
F_nu = (4 * np.pi * cnt.h * np.cos(i)*ni**3)/(cnt.c**2 * d**2) * I_flux
F_nu_arr.append(F_nu.to(u.erg/u.s/u.Hz).value)
F_nu_arr = np.array(F_nu_arr)
s = ps.ArraySpectrum(lam, F_nu_arr, fluxunits='fnu', waveunits='Angstrom')
s.convert('flam')
fluxFactor = 4*np.pi*d**2
return s.flux*fluxFactor
def _get_Qnu(self, a, lam, wavedusttype="silicate"):
'''
'''
from scipy import interpolate
x = np.array([0.001, 0.01, 0.1, 1]) #size
y = np.array([0.01, 0.06, 0.2, 7, 10 ]) #wavelength
#--> size
# | wave
# v
z = np.array([[0.02, 0.2, 0.85, 0.85],
[0.02, 0.7, 0.7, 0.7],
[0.001, 0.01, 0.7, 0.7],
[0.00007, 0.001, 0.01, 0.1],
[0.001, 0.01, 0.1, 1]])
f = interpolate.interp2d(x, y, z, kind='linear')
return f(a, lam)
def _get_knu(self, a, wave, rho=1, ):
'''
Returns the values for the dust mass absorption coefficient
for the Spitzer bands for the given grain size and wavelength.
k_nu = (3. / 4 * np.pi * rho * a**3)* (np.pi * a**2 * Q_nu(a))
'''
k_nu = (3. / 4 * np.pi * rho * a**3)* (np.pi * a**2 * self.Q_nu(a, wave))
return k_nu
def _model_dust(self, Md, Td, a):
'''
Using the dust modelling approach from Fox et. al. 2010.
The assumption is that the dust is optically thin and that there is only one size and
one dust composition.
The opactities are taken from their Figure 4 values.
F_nu = M_d B_nu (T_d )k_nu(a) / d**2
'''
Bnu = ps.BlackBody(Td)
Bnu.convert('fnu')
knu = self._get_knu(a, wave) * u.cm**2 / u.g
Fnu = Md * u.Msun * Bnu * knu / (self.distMpc * u.Mpc)**2
#likelihood function
def _like(self, p, xdat, ydat, errdat, debug=False):
'''
p: function parameters
args: carry anything we want to pass to our function (e.g. the data)
'''
if self.model == "BlackBody":
ymod = self._model(xdat, p)
elif self.model == "BlackBody_Av":
ymod = self._model_av_r(xdat, p)
elif self.model == "BlackBody2_Av":
ymod = self._model2_av_r(xdat, p)
elif self.model == "BlackBody2":
ymod = self._model2_r(xdat, p)
elif self.model == "PowerLaw":
ymod = self._model_powerlaw(xdat, p)
elif self.model == "PowerLaw_BlackBody":
ymod = self._model_powerlaw_bb(xdat, p)
elif self.model == "Disk":
ymod = self._model_accretion_disk(xdat, p)
else:
print ("Unknown model", self.model)
return np.nan
#Discard models which exceed the upper limits
if (np.any(ymod[errdat<0] > ydat[errdat<0])):
prob = 1e-320
#Compute the likelihood with only valid datapoints.
else:
prob = stats.norm.pdf(ydat[errdat>0] , ymod[errdat>0] , errdat[errdat>0] )
# log probabilities
# we add tiny number to avoid NaNs
mylike = np.log(prob + 1e-320).sum()
return mylike
def _logposterior(self, p, xdat, ydat, errdat):
'''
Returns the posterior of the observations. In essence the likelihood and the prior:
#log(likelihood) + log(prior)
'''
lp = self._logprior(p)
if (not np.isinf(lp)):
lp= self._like(p, xdat, ydat, errdat) + lp
return lp
def _logprior(self, p):
'''
Returns the prior probability distribution for each model.
'''
if self.model == "BlackBody":
T1 = p[0]
R1 = p[1]
if T1 < 0 or R1 < 0:
return -np.inf
logp = stats.uniform.logpdf(T1, 10, 15000)
logp = logp + stats.uniform.logpdf(R1, 1, 50000)
if self.model =="BlackBody_Av":
T1 = p[0]
R1 = p[1]
av = p[2]
if T1 < 0 or R1 < 0 or av < 0:
return -np.inf
else:
logp = stats.uniform.logpdf(T1, 10, 15000)
logp = logp + stats.uniform.logpdf(R1, 10000, 120000)
logp = logp + stats.uniform.logpdf(av, 0, 3)
elif self.model == "BlackBody2":
T1 = p[0]
R1 = p[1]
T2 = p[2]
R2 = p[3]
if T1 < 0 or T2 > T1 or T2 < 0 or R1 < 0 or R2<0:
return - np.inf
else:
logp = stats.uniform.logpdf(T1, 100, 10000)
logp = logp + stats.uniform.logpdf(R1, 10, 12000)
logp = logp + stats.uniform.logpdf(T2, 10, 5000)
logp = logp + stats.uniform.logpdf(R2, 10, 12000)
elif self.model == "BlackBody2_Av":
T1 = p[0]
R1 = p[1]
av = p[2]
T2 = p[3]
R2 = p[4]
if T1 < 0 or T2 > T1 or T2 < 0 or av < 0 or av > 10:
return - np.inf
else:
logp = stats.uniform.logpdf(T1, 100, 1000)
logp = logp + stats.uniform.logpdf(R1, 10000, 120000)
logp = logp + stats.uniform.logpdf(av, 0, 3)
logp = logp + stats.uniform.logpdf(T2, 100, 1000)
logp = logp + stats.uniform.logpdf(R2, 10000, 120000)
elif self.model == "PowerLaw":
alpha = p[0]
scale = p[1]
av = p[2]
if av < 0:
logp = -np.inf
else:
logp = stats.uniform.logpdf(alpha, 0, 3)
logp = logp + stats.uniform.logpdf(scale, 0.1, 100)
logp = logp + stats.uniform.logpdf(av, 0, 3)
elif self.model == "PowerLaw_BlackBody":
alpha = p[0]
scale = p[1]
T1 = p[2]
R1 = p[3]
if R1 < 0 or T1 < 0 or alpha < 0:
logp = -np.inf
else:
logp = stats.uniform.logpdf(alpha, 0, 3)
logp = logp + stats.uniform.logpdf(scale, 0.1, 100)
logp = logp + stats.uniform.logpdf(T1, 500, 20000)
logp = logp + stats.uniform.logpdf(R1, 0, 500)
elif self.model == "Disk":
Mstar = p[0]
Rstar = p[1]
logMacc = p[2]
R_out = p[3]
if Rstar < 0 or Mstar < 0 or logMacc < -12 or R_out<0 or R_out < Rstar:
logp = -np.inf
else:
logp = stats.uniform.logpdf(Mstar, 0, 1.44)
logp = logp + stats.uniform.logpdf(Rstar, 0, 10)
logp = logp + stats.uniform.logpdf(logMacc, -12, 7)
logp = logp + stats.uniform.logpdf(R_out, 0, 50)
return logp
def _get_max_and_intervals(self, x):
'''
Provided a chain of samples, finds the average value and returns the values
for a 1 sigma distribution following the 34 and 66 percentiles.
'''
return np.percentile(x, 34), np.percentile(x, 50), np.percentile(x, 66)
#return percent1, maxp, percent2
def _area2rsun(self, A):
'''
Given the area of the black body in cm2 returns the radius for the object in solar radius.
'''
Aream2 = A * u.cm**2 # add units
Rad = np.sqrt(Aream2/(4*(np.pi)**2)).to(u.Rsun) #in Rsun
return Rad.value
def _fill_output(self):
'''
Computes the confidence intervals from the MCMC distribution.
Transforms the temperature ad radius into a black body luminosity.
'''
if self.model.startswith("BlackBody"):
T1, T, T2 = self._get_max_and_intervals(self.sampler.flatchain[:,0])
R1, R, R2 = self._get_max_and_intervals(self.sampler.flatchain[:,1])
self.T = T
self.Terr1 = T - T1
self.Terr2 = T2 - T
self.R = R
self.Rerr1 = R - R1
self.Rerr2 = R2 - R
self.L = self._get_bol_lum(T, R)
self.Lerr1 = self.L - self._get_bol_lum(T1, R1)
self.Lerr2 = self._get_bol_lum(T2, R2) - self.L
if self.model == "BlackBody_Av":
Av1, Av, Av2 = self._get_max_and_intervals(self.sampler.flatchain[:,2])
self.Av = Av
self.Averr1 = Av - Av1
self.Averr2 = Av2 - Av
elif self.model == "BlackBody2_Av":
Av1, Av, Av2 = self._get_max_and_intervals(self.sampler.flatchain[:,2])
Tsec1, Tsec, Tsec2 = self._get_max_and_intervals(self.sampler.flatchain[:,3])
Rsec1, Rsec, Rsec2 = self._get_max_and_intervals(self.sampler.flatchain[:,4])
self.Av = Av
self.Averr1 = Av - Av1
self.Averr2 = Av2 - Av
self.Tsec = Tsec
self.Tsecerr1 = Tsec - Tsec1
self.Tsecerr2 = Tsec2 - Tsec
self.Rsec = Rsec
self.Rsecerr1 = Rsec - Rsec1
self.Rsecerr2 = Rsec2 - Rsec
elif self.model == "BlackBody2":
Tsec1, Tsec, Tsec2 = self._get_max_and_intervals(self.sampler.flatchain[:,2])
Rsec1, Rsec, Rsec2 = self._get_max_and_intervals(self.sampler.flatchain[:,3])
self.Tsec = Tsec
self.Tsecerr1 = Tsec - Tsec1
self.Tsecerr2 = Tsec2 - Tsec
self.Rsec = Rsec
self.Rsecerr1 = Rsec - Rsec1
self.Rsecerr2 = Rsec2 - Rsec
self.Lsec = self._get_bol_lum(Tsec, Rsec)
self.Lsecerr1 = self.Lsec - self._get_bol_lum(Tsec1, Rsec1)
self.Lsecerr2 = self._get_bol_lum(Tsec2, Rsec2) - self.Lsec
elif self.model=="PowerLaw":
alpha1, alpha, alpha2 = self._get_max_and_intervals(self.sampler.flatchain[:,0])
R1, R, R2 = self._get_max_and_intervals(self.sampler.flatchain[:,1])
Av1, Av, Av2 = self._get_max_and_intervals(self.sampler.flatchain[:,2])
self.alpha = alpha
self.alphaerr1 = alpha - alpha1
self.alphaerr2 = alpha2 - alpha
self.R = R
self.Rerr1 = R - R1
self.Rerr2 = R2 - R
self.Av = Av
self.Averr1 = Av - Av1
self.Averr2 = Av2 - Av
elif self.model=="PowerLaw_BlackBody":
alpha1, alpha, alpha2 = self._get_max_and_intervals(self.sampler.flatchain[:,0])
scale1, scale, scale2 = self._get_max_and_intervals(self.sampler.flatchain[:,1])
T1, T, T2 = self._get_max_and_intervals(self.sampler.flatchain[:,2])
R1, R, R2 = self._get_max_and_intervals(self.sampler.flatchain[:,3])
self.alpha = alpha
self.alphaerr1 = alpha - alpha1
self.alphaerr2 = alpha2 - alpha
self.scale = scale
self.scaleerr1 = scale - scale1
self.scaleerr2 = scale2 - scale
self.T = T
self.Terr1 = T - T1
self.Terr2 = T2 - T
self.R = R
self.Rerr1 = R - R1
self.Rerr2 = R2 - R
elif self.model=="Disk":
Mstar1, Mstar, Mstar2 = self._get_max_and_intervals(self.sampler.flatchain[:,0])
Rstar1, Rstar, Rstar2 = self._get_max_and_intervals(self.sampler.flatchain[:,1])
logMacc1, logMacc, logMacc2 = self._get_max_and_intervals(self.sampler.flatchain[:,2])
R_out1, R_out, R_out2 = self._get_max_and_intervals(self.sampler.flatchain[:,3])
#scale1, scale, scale2 = self._get_max_and_intervals(self.sampler.flatchain[:,3])
self.Mstar = Mstar
self.Mstarerr1 = Mstar - Mstar1
self.Mstarerr2 = Mstar2 - Mstar
self.Rstar = Rstar
self.Rstarerr1 = Rstar - Rstar1
self.Rstarerr2 = Rstar2 - Rstar
self.logMacc = logMacc
self.logMaccerr1 = logMacc - logMacc1
self.logMaccerr2 = logMacc2 - logMacc
self.R_out = R_out
self.R_outerr1 = R_out - R_out1
self.R_outerr2 = R_out2 - R_out
def _save_output(self):
'''
Saves in a results file.
'''
exists = os.path.isfile(self.resfile)
with open(self.resfile, 'a') as outfile:
print ("Saving results to %s"%self.resfile)
if self.model == "BlackBody":
if not exists:
outfile.write("mjd T Terr1 Terr2 R Rerr1 Rerr2 L Lerr1 Lerr2 Av_MW\n")
outfile.write("%.5f %.3f %.3f %.3f %.3f %.3f %.3f %.3e %.3e %.3e %.3f\n"%\
(self.mjd, self.T, self.Terr1, self.Terr2, self.R, self.Rerr1, self.Rerr2, self.L, self.Lerr1, self.Lerr2, self.av_mw))
elif self.model == "BlackBody_Av":
if not exists:
outfile.write("mjd T Terr1 Terr2 R Rerr1 Rerr2 L Lerr1 Lerr2 Av Averr1 Averr2 Av_MW\n")
outfile.write("%.5f %.3f %.3f %.3f %.3f %.3f %.3f %.3e %.3e %.3e %.3f %.3f %.3f %.3f\n"%\
(self.mjd, self.T, self.Terr1, self.Terr2, self.R, self.Rerr1, self.Rerr2, \
self.L, self.Lerr1, self.Lerr2, self.Av, self.Averr1, self.Averr2, self.av_mw))
elif self.model == "BlackBody2":
if not exists:
outfile.write("mjd T Terr1 Terr2 R Rerr1 Rerr2 L Lerr1 Lerr2 Tsec Tsecerr1 Tsecerr2 Rsec Rsecerr1 Rsecerr2 Lsec Lsecerr1 Lsecerr2 Av_MW\n")
outfile.write("%.5f %.3f %.3f %.3f %.3f %.3f %.3f %.3e %.3e %.3e %.3f %.3f %.3f %.3f %.3f %.3f %.3e %.3e %.3e %.3f \n"%\
(self.mjd, self.T, self.Terr1, self.Terr2, self.R, self.Rerr1, self.Rerr2, \
self.L, self.Lerr1, self.Lerr2,
self.Tsec, self.Tsecerr1, self.Tsecerr2, self.Rsec, self.Rsecerr1, self.Rsecerr2, \
self.Lsec, self.Lsecerr1, self.Lsecerr2, self.av_mw))
elif self.model == "BlackBody2_Av":
if not exists:
outfile.write("mjd T Terr1 Terr2 R Rerr1 Rerr2 L Lerr1 Lerr2 Av Averr1 Averr2 Tsec Tsecerr1 Tsecerr2 Rsec Rsecerr1 Rsecerr2 Av_MW\n")
outfile.write("%.5f %.3f %.3f %.3f %.3f %.3f %.3f %.3e %.3e %.3e %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f\n"%\
(self.mjd, self.T, self.Terr1, self.Terr2, self.R, self.Rerr1, self.Rerr2, \
self.L, self.Lerr1, self.Lerr2, self.Av, self.Averr1, self.Averr2,\
self.Tsec, self.Tsecerr1, self.Tsecerr2, self.Rsec, self.Rsecerr1, self.Rsecerr2, self.av_mw))
elif self.model == "PowerLaw":
if not exists:
outfile.write("mjd alpha alphaerr1 alphaerr2 scale scaleerr1 scaleerr2 Av Averr1 Averr2 Av_MW\n")
outfile.write("%.5f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f\n"%\
(self.mjd, self.alpha, self.alphaerr1, self.alphaerr2, self.scale, self.scaleerr1, self.scaleerr2, \
self.Av, self.Averr1, self.Averr2, self.av_mw))
elif self.model == "PowerLaw_BlackBody":
if not exists:
outfile.write("mjd alpha alphaerr1 alphaerr2 scale scaleerr1 scaleerr2 T Terr1 Terr2 R Rerr1 Rerr2 Av_MW\n")
outfile.write("%.5f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f\n"%\
(self.mjd, self.alpha, self.alphaerr1, self.alphaerr2, self.scale, self.scaleerr1, self.scaleerr2, \
self.T, self.Terr1, self.Terr2, \
self.R, self.Rerr1, self.Rerr2, \
self.av_mw))
elif self.model == "Disk":
if not exists:
outfile.write("mjd M Merr1 Merr2 Rstar Rerr1 Rerr2 Macc Maccerr1 Maccerr2 R_out R_outerr1 R_outerr2 Av_MW\n")
outfile.write("%.5f %.3f %.3f %.3f %.3f %.3f %.3f %.3e %.3e %.3e %.3e %.3e %.3e %.3f\n"%\
(self.mjd, self.Mstar, self.Mstarerr1, self.Mstarerr1, \
self.Rstar, self.Rstarerr1, self.Rstarerr2,\
self.logMacc, self.logMaccerr1, self.logMaccerr2,\
#self.scale, self.scaleerr1, self.scaleerr2, \
self.R_out, self.R_outerr1, self.R_outerr2,\
self.av_mw))
else:
print ("Unknown model! %s"%self.model)
def _get_bol_lum(self, T, R):
'''
T is in K
R in R_sun.
Gives the Lbol in Lsun
'''
L = cnt.sigma_sb * (T * u.K)**4 * 4 * np.pi * (R*u.Rsun)**2
return (L.to(u.Lsun)).value
def _get_save_path(self, savefile, plot_name=""):
'''
Checks what savefile name has been given.
If there is a value, then it jsut stores it in the plot directory provided.
If there is no name, then it creates a filename with the suffix provided.
It also checks if there is already a file named like that, and it that is the case,
it increases the suffix so that it has a higher number, avoiding collision.
'''
#If there is a given name to store the file, then we use that one
if (not savefile is None):
if os.path.dirname(savefile) == "":
name = os.path.join(self.plotdir, os.path.basename(savefile))
#If there is no name, then we will save the plots in the plot directory
#with an automatic name.
# This name will increase a count if the name exists already.
else:
i = 0
name = os.path.join(self.plotdir, "%s_%.1f_%d.pdf"%(plot_name, self.mjd, i))
while (os.path.isfile(name)):
i = i+1
name = os.path.join(self.plotdir, "%s_%.1f_%d.pdf"%(plot_name, self.mjd, i))
return name
def _initialize_parameters(self, plot=False):
'''
Runs the least squares optimiztion routine to find the best initial parameters
to start the MCMC with.
'''
lam = np.linspace(np.min(self.wls)*0.9, np.max(self.wls)*1.1, 2000)
a_v_wls = extinction.fitzpatrick99(self.wls, a_v=self.av_mw, unit='aa')
reddening = 10**(0.4*a_v_wls)
if self.model == "BlackBody":
flux_ini = self._model_2(lam, self.initT1, self.initR1)
p0 = (self.initT1, self.initR1)
print ("Initial parameters given:", p0)
#Perform a LSQ fit
#params, covar = curve_fit(self._model_2, self.wls , self.fluxes, \
#p0 = p0, sigma=self.fluxerrs, absolute_sigma=True, maxfev = 20000)
#flux_end = self._model_2(lam, *params)
if plot:
plt.clf()
mask_lims = self.fluxerrs<0
plt.plot(lam, flux_ini, "r--", label="Fit initial parameters")
#plt.plot(lam, flux_end, label="Best fit LSQ")
plt.errorbar(self.wls[~mask_lims], self.fluxes[~mask_lims], yerr=self.fluxerrs[~mask_lims], marker="o", color="b", lw=0, label="Measurements")
plt.errorbar(self.wls[mask_lims], self.fluxes[mask_lims], yerr=self.fluxes[mask_lims]*0.2, fmt="o", color="b", uplims=True)
plt.xlabel("Wavelength [A]")
plt.ylabel("$F_{\\lambda}$ [erg/s/cm2/A]")
plt.ylim(0.8*np.min(self.fluxes), 1.2*np.max(self.fluxes))
plt.legend()
plt.yscale("log")
name = self._get_save_path(None, "fluxes_obs_bb")
plt.savefig(name, dpi=200)
elif self.model == "BlackBody_Av":
flux_ini = self._model_av_r_2(lam, self.initT1, self.initR1, self.av_host)
p0 = (self.initT1, self.initR1, self.av_host)
print ("Initial ", p0)
#params, covar = curve_fit(self._model_av_r_2, self.wls , self.fluxes, \
#p0 = p0, sigma=self.fluxerrs, absolute_sigma=True, maxfev = 20000)
#flux_end = self._model_av_r_2(lam, *params)
if plot:
plt.clf()
plt.plot(lam, flux_ini, "r--", label="Fit initial parameters")
#plt.plot(lam, flux_end, label="Best fit LSQ")
plt.errorbar(self.wls, self.fluxes, yerr=self.fluxerrs, marker="o", lw=0, label="Measurements")
plt.xlabel("Wavelength [A]")
plt.ylabel("$L_{\\lambda}$ [erg/s/A]")
plt.ylim(0.8*np.min(self.fluxes), 1.2*np.max(self.fluxes))
plt.legend()
name = self._get_save_path(None, "fluxes_obs_bb_av")
plt.savefig(name, dpi=200)
elif self.model == "BlackBody2_Av":
flux_ini = self._model2_av_r_2(lam, self.initT1, self.initR1, self.av_host, self.initT2, self.initR2)
p0 = (self.initT1, self.initR1, self.av_host, self.initT2, self.initR2)
print ("Initial ", p0)
#params, covar = curve_fit(self._model2_av_r_2, self.wls , self.fluxes, \
#p0 = p0, sigma=self.fluxerrs, absolute_sigma=True, maxfev = 20000)
#flux_end = self._model2_av_r_2(lam, *params)
if plot:
plt.clf()
plt.plot(lam, flux_ini, "r--", label="Fit initial parameters")
#plt.plot(lam, flux_end, label="Best fit LSQ")
plt.errorbar(self.wls, self.fluxes, yerr=self.fluxerrs, marker="o", lw=0, label="Measurements")
plt.xlabel("Wavelength [A]")
plt.ylabel("$L_{\\lambda}$ [erg/s/A]")
plt.ylim(0.8*np.min(self.fluxes), 1.2*np.max(self.fluxes))
plt.legend()
name = self._get_save_path(None, "fluxes_obs")
plt.savefig(name, dpi=200)
elif self.model == "BlackBody2":
flux_ini = self._model2_r_2(lam, self.initT1, self.initR1, self.initT2, self.initR2)
p0 = (self.initT1, self.initR1, self.initT2, self.initR2)
print ("Initial ", p0)
#params, covar = curve_fit(self._model2_r_2, self.wls , self.fluxes, \
#p0 = p0, sigma=self.fluxerrs, absolute_sigma=True, maxfev = 20000)
#flux_end = self._model2_r_2(lam, *params)
#flux_1 = self._model_2(lam, *params[0:2])
#flux_2 = self._model_2(lam, *params[2:])
if plot:
plt.clf()
plt.figure(figsize=(6,4))
plt.plot(lam, flux_ini, "r--", label="Fit initial parameters")
#plt.plot(lam, flux_end, label="Best fit LSQ")
#plt.plot(lam, flux_1, label="BB1")
#plt.plot(lam, flux_2, label="BB2")
plt.errorbar(self.wls, self.fluxes, yerr=self.fluxerrs, marker="o", lw=0, label="Measurements")
plt.xlabel("Wavelength [A]")
plt.ylabel("$L_{\\lambda}$ [erg/s/A]")
plt.legend(loc="best", fontsize=10)
plt.ylim(0.8*np.min(self.fluxes), 1.2*np.max(self.fluxes))
plt.yscale("log")
name = self._get_save_path(None, "fluxes_obs_2bb")
plt.savefig(name, dpi=200)
elif self.model == "PowerLaw":
#params, covar = curve_fit(self._model_powerlaw_2, self.wls , self.fluxes, \
#p0=(self.alpha, self.initR1, self.av_host), sigma=self.fluxerrs, absolute_sigma=True, maxfev = 10000)
lam = np.linspace(3000, 25000, 2000)
fluxpw = self._model_powerlaw_2(lam, self.alpha, self.scale, self.av_host)
if plot:
plt.clf()
plt.plot(lam, fluxpw, label="Fit initial parameters")
plt.errorbar(self.wls, self.fluxes, yerr=self.fluxerrs, marker="o", lw=0, label="Measurements")
plt.xlabel("Wavelength [A]")
plt.ylabel("$L_{\\lambda}$ [erg/s/A]")
plt.ylim(0.8*np.min(self.fluxes), 1.2*np.max(self.fluxes))
plt.legend()
name = self._get_save_path(None, "fluxes_obs_powerlaw")
plt.savefig(name, dpi=200)
print ("Saved fit as %s"%name)
elif self.model == "PowerLaw_BlackBody":
#params, covar = curve_fit(self._model_powerlaw_2, self.wls , self.fluxes, \
#p0=(self.alpha, self.initR1, self.av_host), sigma=self.fluxerrs, absolute_sigma=True, maxfev = 10000)
lam = np.linspace(3000, 25000, 2000)
fluxpw = self._model_powerlaw_bb_2(lam, self.alpha, self.scale, self.initT1, self.initR1)
if plot:
plt.clf()
plt.plot(lam, fluxpw, label="Fit initial parameters")
plt.errorbar(self.wls, self.fluxes, yerr=self.fluxerrs, marker="o", lw=0, label="MW ext. corr")
plt.errorbar(self.wls, self.fluxes/reddening, yerr=self.fluxerrs, marker="o", lw=0, label="Measurements")
plt.xlabel("Wavelength [A]")
plt.ylabel("$L_{\\lambda}$ [erg/s/A]")
plt.ylim(0.8*np.min(self.fluxes/reddening), 1.2*np.max(self.fluxes))
plt.legend(loc="best")
name = self._get_save_path(None, "fluxes_obs_powerlaw_bb")
plt.savefig(name, dpi=200)
print ("Saved fit as %s"%name)
if self.model == 'Disk':
#params = (0.5, 0.2, 5e-9, 1, 2)
p0 = (self.Mstar, self.Rstar, self.logMacc, self.R_out)
#params, covar = curve_fit(self._model_accretion_disk2, self.wls , self.fluxes, \
#p0 = p0, sigma=self.fluxerrs, absolute_sigma=True, maxfev = 20000)
#print ("LSQ fit: Mstar:", params[0], " Rstar", params[1], "logMacc ", \
# params[2], "R_out", params[3])
lam = np.linspace(3000, 25000, 2000)
#flux_disk = self._model_accretion_disk2(lam, params[0], params[1], params[2], params[3])
if plot:
plt.clf()
plt.errorbar(self.wls, self.fluxes, yerr=self.fluxerrs, marker="o", lw=0, label="Measurements")
#plt.plot(lam, flux_disk, lw=3)
plt.xlabel("Wavelength [$\\mu$m]")
plt.ylabel("Flux [erg/cm$^2$/s]")
plt.ylim(np.nanmin(self.fluxes)*0.9, np.nanmax(self.fluxes)*1.2)
plt.legend()
name = self._get_save_path(None, "fluxes_obs_disk")
plt.savefig(name, dpi=200)
print ("Saved fit as %s"%name)
def initialize(self, plot=False):
'''
Will transform the magnitudes to fluxes and use the distance to the object to
calculate the luminosity at each wavelength.
'''
if (not os.path.isdir(self.plotdir)):
os.makedirs(self.plotdir)
print ("Created plot directory %s"%self.plotdir)
#Directory where to store the results
if (not os.path.isdir(self.resdir)):
os.makedirs(self.resdir)
print ("Created result directory %s"%(self.resdir))
self.resfile = os.path.join(self.resdir, self.model + os.path.basename(self.resfile))
# generate the data
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.wls, self.fluxes, self.fluxerrs = self._band2flux()
#Plot the raw fluxes before correcting them.
'''if (plot):
plt.figure(figsize=(8,6))
plt.errorbar(self.wls, self.fluxes, yerr=self.fluxerrs, marker="o", lw=0)
for i in range(len(self.wls)):
plt.text(self.wls[i], self.fluxes[i]*1.01, self.bands[i].split(",")[-1], alpha=.4)
name = self._get_save_path(None, "fluxes_observed")
plt.yscale("log")
plt.xlabel("Wavelength [A]")
plt.ylabel("log (Flux/[erg/cm2/s])")
plt.tight_layout()
plt.savefig(name, dpi=200)'''
if not self.distMpc is None and self.distMpc !=0:
print ("Using distance to the source of %.1e Mpc"%self.distMpc)
fluxFactor = (4*np.pi*((self.distMpc*u.Mpc).to(u.cm) )**2).value
elif (self.distMpc is None or self.distMpc==0 )and (not self.z is None and self.z != 0):
self.distMpc = self.cosmo.luminosity_distance(self.z)
#Compute the flux multiplication factor for the object if it is at distance distMpc
#We transform that to cm, as the flux is in erg cm-2 s-1
fluxFactor = (4*np.pi*(self.distMpc.to(u.cm) )**2).value
else: # self.distMpc is None and self.z is None:
#Here we do not use any multiplication flux factor
print ("Warning: no redshift or distance provided!")
fluxFactor = 1
self.fluxes = self.fluxes * fluxFactor
self.fluxerrs = self.fluxerrs * fluxFactor
self._initialize_parameters(plot)
def run(self):
'''
Runs the main MCMC process.
Retrieves the priors, the likelihood process and computes the posterior probability.
'''
xs = self.wls
ys = self.fluxes
errs = self.fluxerrs
if self.model == "BlackBody":
p0 = np.array([ self.initT1, self.initR1])
sigs = np.array([self.initT1*0.2, self.initR1*0.2])
elif self.model == "BlackBody_Av":
p0 = np.array([ self.initT1, self.initR1, self.av_host])
sigs = np.array([2000, 10, 0.5])
elif self.model == "BlackBody2":
p0 = np.array([ self.initT1, self.initR1, self.initT2, self.initR2])
sigs = np.array([self.initT1*0.2, self.initR1*0.2, self.initT2*0.2, self.initR2*0.2])
elif self.model == "BlackBody2_Av":
p0 = np.array([ self.initT1, self.initR1, self.av_host, self.initT2, self.initR2])
sigs = np.array([2000, 5, 1, 2000, 5])
elif self.model == "PowerLaw":
p0 = np.array([ self.alpha, self.scale, self.av_host])
sigs = np.array([2, 3, 2])
elif self.model == "PowerLaw_BlackBody":
p0 = np.array([ self.alpha, self.scale, self.initT1, self.initR1])
sigs = np.array([2, 3, 2000, 2])
elif self.model == "Disk":
p0 = np.array([ self.Mstar, self.Rstar, self.logMacc, self.R_out])
sigs = np.array([0.1, 0.01, 1, 0.1])
print ("Initialized with p0", p0, " and sigmas ", sigs)
else:
print ("-------------------CRITICAL ERROR!----------------------")
print ("-------------------UNKNOWN model! %s----------------------"%self.model)
print ("-------------------CRITICAL ERROR!----------------------")
sys.exit()
ndim = len(p0)
# emsemble MCMC
p0s = emcee.utils.sample_ball(p0, sigs, self.nwalkers)
# initialize the ball of initial conditions
#Supports the threads=X argument for parallelization
sampler = emcee.EnsembleSampler(self.nwalkers, ndim, self._logposterior,\
args=(xs, ys, errs), threads=10)
pos, lnprob, state = sampler.run_mcmc(p0s, self.burnin)
print ("Burning phase finished")
sampler.reset()
pos, lnprob, state = sampler.run_mcmc(pos, self.niterations)
print ('Acceptance ratio', sampler.acceptance_fraction)
self.sampler = sampler
print ("MCMC main phase finished")
self._fill_output()
self._save_output()
def plot_corner_posteriors(self, savefile=None):
'''
Plots the corner plot of the MCMC results.
'''
if self.model == "BlackBody2":
labels=["T1", "R1", "T2", "R2"]
elif self.model.startswith("BlackBody"):
labels=["T1", "R1", "Av", "T2", "R2"]
elif self.model == "PowerLaw":
labels=["alpha", "scale", "Av"]
elif self.model == "PowerLaw_BlackBody":
labels = ["alpha", "scale", "T", "R"]
elif self.model == "Disk":
labels = ["Mstar", "Rstar", "logMacc", "R_out"]
ndim = len(self.sampler.flatchain[0,:])
chain = self.sampler
samples = chain.flatchain
samples = samples[:,0:ndim]
plt.figure(figsize=(8,8))
fig = corner.corner(samples, labels=labels[0:ndim], quantiles=[0.16, 0.5, 0.84],
show_titles=True, title_kwargs={"fontsize": 12})
fig.suptitle("MJD: %.2f"%self.mjd)
name = self._get_save_path(savefile, "mcmc_posteriors")
plt.savefig(name)
plt.close("all")
plt.figure(figsize=(8,ndim*3))
for n in range(ndim):
plt.subplot(ndim,1,n+1)
chain = self.sampler.chain[:,:,n]
nwalk, nit = chain.shape
for i in np.arange(nwalk):
plt.plot(chain[i], lw=0.1)
plt.ylabel(labels[n])
plt.xlabel("Iteration")
name_walkers = self._get_save_path(savefile, "mcmc_walkers")
plt.tight_layout()
plt.savefig(name_walkers)
plt.close("all")
def plot_fit(self, lambdaFlambda=False):
'''
Plots the best fit model to the data.
'''
lam = np.linspace( np.min(self.wls) -1500 , np.max(self.wls) + 1500, 1000)
plt.clf()
plt.figure(figsize=(8,6))
mask_lims = self.fluxerrs<0
if lambdaFlambda:
factor_obs=self.wls
else:
factor_obs=np.ones_like(self.wls)
plt.errorbar(self.wls[~mask_lims], self.fluxes[~mask_lims]*factor_obs[~mask_lims], yerr=self.fluxerrs[~mask_lims]*factor_obs[~mask_lims], marker="o", color="b", lw=0, label="Measurements")
plt.errorbar(self.wls[mask_lims], self.fluxes[mask_lims]*factor_obs[mask_lims], yerr=self.fluxes[mask_lims]*0.2*factor_obs[mask_lims], fmt="o", color="b", uplims=True)
for i in range(len(self.wls)):
plt.text(self.wls[i], self.fluxes[i]*1.01*factor_obs[i], self.bands[i], alpha=.4, fontsize=8)
if self.model == "BlackBody":
fluxbb = self._model(lam, (self.T, self.R))
if lambdaFlambda:
factor = lam
else:
factor = np.ones_like(lam)
plt.plot(lam, fluxbb*factor, "k-", label="BB fit")
plt.title("T: %d K R:%d R$_{\odot}$ Lumiosity %.2e L$_{\odot}$"%(self.T, self.R, self.L))
elif self.model == "BlackBody_Av":
fluxbb = self._model(lam, (self.T, self.R))
fluxbb_red = self._model_av_r(lam, (self.T, self.R, self.Av))
plt.plot(lam, fluxbb, "k-", label="BB fit")
plt.plot(lam, fluxbb_red, "red", label="BB fit + reddening")
plt.title("T: %.1f K R:%.1f R$_{\odot}$ Lumiosity %.1e L$_{\odot}$ Av: %.2f"%(np.round(self.T,0), np.round(self.R,0), np.round(self.L,1), self.Av))
elif self.model == "BlackBody2_Av":
fluxbb_red = self._model2_av(lam, (self.T, self.R, self.Av))
fluxbb_secondary_red = self._model2_av(lam, (self.Tsec, self.Rsec, self.Av))
fluxbb_with_seconday = self._model2_av(lam, (self.T, self.R, self.Av, self.Tsec, self.Rsec))
plt.plot(lam, fluxbb_red, "k-", label="BB1 fit + reddening")
plt.plot(lam, fluxbb_secondary_red, "k--", label="BB2 fit + reddening")
plt.plot(lam, fluxbb_with_seconday, "green", label="BB1 + BB2")
plt.title("T: %.1f K R:%.1f R$_{\odot}$ Lumiosity %.1e L$_{\odot}$ Av: %.2f\n T2: %.1f R2: %.1f"%(self.T, \
self.R, self.L, self.Av, self.Tsec, self.Rsec))
elif self.model == "BlackBody2":
fluxbb_primary = self._model(lam, (self.T, self.R))
fluxbb_secondary = self._model(lam, (self.Tsec, self.Rsec))
fluxbb_with_seconday = self._model2_r(lam, (self.T, self.R, self.Tsec, self.Rsec))
plt.plot(lam, fluxbb_primary, "k-", label="BB1 fit")
plt.plot(lam, fluxbb_secondary, "k--", label="BB2 fit")
plt.plot(lam, fluxbb_with_seconday, "green", label="BB1 + BB2")
plt.title("T: %d K R:%d R$_{\odot}$ T2: %d R2: %d"%( self.T, \
self.R, self.Tsec, self.Rsec))
elif self.model == "PowerLaw":
flux = self._model_powerlaw(lam, (self.alpha, self.scale, self.Av))
plt.plot(lam, flux, "k-", label="PowerLaw + reddening")
plt.title("$\\alpha$: %.1f Av: %.2f"%(self.alpha, self.Av))
elif self.model == "PowerLaw_BlackBody":
flux = self._model_powerlaw_bb(lam, (self.alpha, self.scale, self.T, self.R))
flux_pw = self._model_powerlaw(lam, (self.alpha, self.scale, 0))
flux_bb = self._model(lam, (self.T, self.R))
plt.plot(lam, flux, "k-", label="PowerLaw + BlackBody")
plt.plot(lam, flux_pw, "b--", label="PowerLaw")
plt.plot(lam, flux_bb, "g:", label="BlackBody")
plt.title("$\\alpha$: %.1f scale: %.2e T: %.1f R:%.1f"%(self.alpha, self.scale, self.T, self.R))
elif self.model == "Disk":
fluxdisk = self._model_accretion_disk(lam, (self.Mstar, self.Rstar, self.logMacc, self.R_out))
plt.plot(lam, fluxdisk, "k-", label="Disk fit")
plt.title("M:%.3f M$_{\\odot}$ R:%.3f R$_{\odot}$ M$_{acc}$:%.2f R_out: %.2f"%(self.Mstar, self.Rstar, self.logMacc, self.R_out))
ymin, ymax = plt.ylim()
#plt.ylim(np.max([ymin, np.min(self.fluxes)*0.01]), ymax)
plt.xlabel("Wavelength [$\\AA$]")
if (lambdaFlambda):
plt.ylabel("$\\lambda F_{\\lambda}$ [erg/s]")
plt.ylim(ymin=np.min(self.fluxes*factor_obs) * 0.1)
else:
plt.ylabel("$F_{\\lambda}$ [erg/s/$\\AA$]")
plt.ylim(ymin=np.min(self.fluxes) * 0.1)
plt.yscale("log")
plt.legend()
name = self._get_save_path(None, "mcmc_best_fit_model")
plt.savefig(name)
plt.close("all")
def write_fit_params(self):
'''
Write the best fit parameters of the model to the standard output.
'''
if self.model.startswith("BlackBody"):
#Prints the best parameters
print ('''
Temperature: \t %.3f -%.3f +%.3f K
Radius: \t\t %.2e -%.2e +%.2e R$_{\odot}$
Luminosity: \t %.3e -%.3e +%.3e L$_{\odot}$'''%(\
self.T, self.Terr1, self.Terr2, \
self.R, self.Rerr1, self.Rerr2, \
self.L, self.Lerr1, self.Lerr2))
if self.model == "BlackBody_Av":
print (" Av: \t\t\t %.1f -%.1f +%.1f mag"%(self.Av, self.Averr1, self.Averr2))
if self.model == "BlackBody2":
print (" Temperature2: %.1f -%.1f +%.1f K"%(self.Tsec, self.Tsecerr1, self.Tsecerr2))
print (" Radius2: %.2e -%.2e +%.2e R$_{\odot}$"%(self.Rsec, self.Rsecerr1, self.Rsecerr2))
print (" Luminosity2 %.3e -%.3e +%.3e L$_{\odot}$"%(self.Lsec, self.Lsecerr1, self.Lsecerr2))
if self.model == "BlackBody2_Av":
print (" Av: %.1f -%.1f +%.1f mag"%(self.Av, self.Averr1, self.Averr2))
print (" Temperature2: %.1f -%.1f +%.1f K"%(self.Tsec, self.Tsecerr1, self.Tsecerr2))
print (" Radius2: %.1f -%.1f +%.1f R$_{\odot}$"%(self.Rsec, self.Rsecerr1, self.Rsecerr2))
if (self.model == "PowerLaw"):
print ('''
alpha: %.2f -%.2f +%.2f
Scale : %.2e -%.2e +%.2e
Av %.2f -%.2f +%.2f'''%(\
self.alpha, self.alphaerr1, self.alphaerr2, \
self.scale, self.scaleerr1, self.scaleerr2, \
self.Av, self.Averr1, self.Averr2))
if (self.model == "PowerLaw_BlackBody"):
print ('''
alpha: %.2f -%.2f +%.2f
Scale (R): %.2e -%.2e +%.2e
T %.2f -%.2f +%.2f
R %.2f -%.2f +%.2f '''%(\
self.alpha, self.alphaerr1, self.alphaerr2, \
self.scale, self.scaleerr1, self.scaleerr2,\
self.T, self.Terr1, self.Terr2,\
self.R, self.Rerr1, self.Rerr2 ))
if (self.model == "Disk"):
print ('''
Mstar: %.3f$_{-%.3f}^{+%.3f}$
Rstar (10^8 cm): %.3f -%.3f +%.3f
logMacc %.3f$_{-%.3f}^{+%.3f}$
R_out %.3f$_{-%.3f}^{+%.3f}$ '''%(\
self.Mstar, self.Mstarerr1, self.Mstarerr2, \
self.Rstar*(u.Rsun.to(u.cm))/1e8, self.Rstarerr1*(u.Rsun.to(u.cm))/1e8, self.Rstarerr2*(u.Rsun.to(u.cm))/1e8,\
self.logMacc, self.logMaccerr1, self.logMaccerr2,\
self.R_out, self.R_outerr1, self.R_outerr2 ))
|
mit
| -9,118,717,144,658,973,000
| 39.41373
| 196
| 0.504743
| false
| 3.123932
| false
| false
| false
|
disco-framework/disco
|
priv/general/components/gui/gui.py
|
1
|
18070
|
#!/usr/bin/python
import sys
from PyQt4 import QtCore, QtGui
from ui_mainview import Ui_MainWindow
import json
from jsonreader import JsonReader
##################################################
class MainWindow(QtGui.QMainWindow):
def __init__(self, parent=None):
QtGui.QMainWindow.__init__(self, parent)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
# status bar
self.labelProblemSpec = QtGui.QLabel()
self.labelProblemTime = QtGui.QLabel()
self.labelCurrentRound = QtGui.QLabel()
self.labelWorkerInput = QtGui.QLabel()
self.ui.statusbar.addWidget(self.labelProblemSpec, 1)
self.ui.statusbar.addWidget(self.labelProblemTime, 1)
self.ui.statusbar.addWidget(self.labelCurrentRound, 1)
self.ui.statusbar.addWidget(self.labelWorkerInput, 1)
# set menu shortcuts
self.ui.actionLoadGameState.setShortcut(QtGui.QKeySequence(self.tr("Ctrl+O")))
self.ui.actionSaveGameState.setShortcut(QtGui.QKeySequence(self.tr("Ctrl+S")))
self.ui.actionQuit.setShortcut(QtGui.QKeySequence(self.tr("Ctrl+Q")))
self.ui.actionStartRound.setShortcut(QtGui.QKeySequence(self.tr("Ctrl+R")))
self.ui.actionAddScores.setShortcut(QtGui.QKeySequence(self.tr("Ctrl+A")))
self.ui.actionKillAllWorkers.setShortcut(QtGui.QKeySequence(self.tr("Ctrl+K")))
self.DataCollector = JsonReader(self)
self.connect(self.DataCollector, QtCore.SIGNAL("received_data"), self.received)
self.connect(self.DataCollector, QtCore.SIGNAL("worker_updated"), self.update_worker)
self.connect(self.DataCollector, QtCore.SIGNAL("round_started"), self.start_round)
self.connect(self.DataCollector, QtCore.SIGNAL("round_ended"), self.end_round)
self.connect(self.DataCollector, QtCore.SIGNAL("worker_input_changed"), self.update_worker_input)
self.connect(self.DataCollector, QtCore.SIGNAL("problem_chosen"), self.choose_problem)
self.connect(self.DataCollector, QtCore.SIGNAL("all_data"), self.update_all)
self.connect(self.DataCollector, QtCore.SIGNAL("save_game_state_reply"), self.save_game_state_reply)
self.connect(self.DataCollector, QtCore.SIGNAL("load_game_state_reply"), self.load_game_state_reply)
self.DataCollector.start()
self.problemAnswerTime = 0
self.roundTimerRemaining = 0
self.roundTimer = QtCore.QTimer()
QtCore.QObject.connect(self.roundTimer, QtCore.SIGNAL("timeout()"), self.roundTimer_tick)
# file menu
QtCore.QObject.connect(self.ui.actionLoadGameState, QtCore.SIGNAL("triggered()"), self.btnLoadGameState_clicked)
QtCore.QObject.connect(self.ui.actionSaveGameState, QtCore.SIGNAL("triggered()"), self.btnSaveGameState_clicked)
QtCore.QObject.connect(self.ui.actionReloadAllData, QtCore.SIGNAL("triggered()"), self.btnReloadAllData_clicked)
QtCore.QObject.connect(self.ui.actionQuit, QtCore.SIGNAL("triggered()"), self.btnQuit_clicked)
# round menu
QtCore.QObject.connect(self.ui.actionStartRound, QtCore.SIGNAL("triggered()"), self.btnStartRound_clicked)
QtCore.QObject.connect(self.ui.actionAddScores, QtCore.SIGNAL("triggered()"), self.btnAddScores_clicked)
QtCore.QObject.connect(self.ui.actionKillAllWorkers, QtCore.SIGNAL("triggered()"), self.btnKillAllWorkers_clicked)
# worker tab
self.ui.tableWorker.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.ui.tableWorker.customContextMenuRequested.connect(self.tableWorker_requestContextMenu)
# io tab
QtCore.QObject.connect(self.ui.btnSend, QtCore.SIGNAL("clicked()"), self.btnSend_clicked)
QtCore.QObject.connect(self.ui.edtSend, QtCore.SIGNAL("returnPressed()"), self.btnSend_clicked)
# worker table header
thh = self.ui.tableWorker.horizontalHeader()
thh.setVisible(True)
thh.resizeSection(0, 50) # ranking group
thh.resizeSection(1, 60) # id
thh.resizeSection(2, 170) # name
thh.resizeSection(3, 230) # proposition
thh.resizeSection(4, 100) # points
thh.resizeSection(5, 50) # processed points
thh.resizeSection(6, 100) # problem points (accumulated over all rounds on this problem)
thh.setSortIndicator(1, QtCore.Qt.AscendingOrder)
tvh = self.ui.tableWorker.verticalHeader()
tvh.setVisible(True)
tvh.setResizeMode(QtGui.QHeaderView.Fixed)
self.reset_problem_list([])
self.worker_blocked = {}
def closeEvent(self, e):
self.send(json.dumps({'action': 'quit program'}))
self.DataCollector.terminate() # TODO: "This function is dangerous and its use is discouraged"
self.DataCollector.wait()
e.accept()
app.exit()
###############################
## main menu / buttons ##
###############################
## file menu
def btnLoadGameState_clicked(self):
fileName = str(QtGui.QFileDialog.getOpenFileName())
if fileName != "":
self.send(json.dumps({'action': 'load game state', 'file path': fileName}))
def btnSaveGameState_clicked(self):
fileName = str(QtGui.QFileDialog.getSaveFileName())
if fileName != "":
self.send(json.dumps({'action': 'save game state', 'file path': fileName}))
def btnReloadAllData_clicked(self):
self.send(json.dumps({'action': 'get all data'}))
def btnQuit_clicked(self):
self.close()
## problems menu
def btnChooseProblem_clicked(self, idx, action, oldChecked):
action.setChecked(oldChecked) # undo auto check
self.send(json.dumps({'action': 'choose problem', 'problem idx': idx}))
## round menu
def btnStartRound_clicked(self):
self.send(json.dumps({'action': 'start round'}))
def btnAddScores_clicked(self):
self.send(json.dumps({'action': 'add scores'}))
self.ui.actionAddScores.setEnabled(False)
def btnKillAllWorkers_clicked(self):
self.send(json.dumps({'action': 'kill all workers'}))
## worker tab
def tableWorker_requestContextMenu(self, position):
workerId = str(self.ui.tableWorker.item(self.ui.tableWorker.currentRow(), 1).text())
# create menu
menu = QtGui.QMenu()
actApply = menu.addAction("&Apply proposition")
actBlock = None
actUnblock = None
if self.worker_blocked[workerId]:
actUnblock = menu.addAction("Un&block worker '" + workerId + "'")
else:
actBlock = menu.addAction("&Block worker '" + workerId + "'")
# execute menu synchronously
action = menu.exec_(self.ui.tableWorker.viewport().mapToGlobal(position))
if action != None:
if action == actApply:
if QtGui.QMessageBox.information(self, "Apply proposition", "Really apply proposition from " + workerId + "?",
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,
QtGui.QMessageBox.No) == QtGui.QMessageBox.Yes:
self.send(json.dumps({'action': 'apply proposition', 'worker id': workerId}))
elif action == actBlock:
self.send(json.dumps({'action': 'block worker', 'worker id': workerId}))
elif action == actUnblock:
self.send(json.dumps({'action': 'unblock worker', 'worker id': workerId}))
## io tab
def btnSend_clicked(self):
msg = self.ui.edtSend.text()
self.send(msg)
self.ui.edtSend.clear()
#######################
## Round timer ##
#######################
def roundTimer_tick(self):
self.roundTimerRemaining -= self.roundTimer.interval()
if self.roundTimerRemaining <= 0:
self.roundTimer.stop()
self.roundTimerRemaining = 0
self.labelProblemTime.setText("Answer time remaining\n " +
str(self.roundTimerRemaining/1000) + "s")
#######################
## JSON events ##
#######################
def update_worker(self, id, proposition, caption, score, processedScore, problemScore, blocked, working):
row = self.get_worker_table_row(id)
if proposition == None:
proposition = ""
if row != None:
self.update_worker_by_row(row, id, proposition, caption, score, processedScore, problemScore, blocked, working)
def start_round(self, round):
self.ui.actionStartRound.setEnabled(False)
self.ui.menuProblems.setEnabled(False)
self.ui.actionAddScores.setEnabled(False)
self.labelCurrentRound.setText("Round (running)\n " + str(round))
self.roundTimerRemaining = self.problemAnswerTime
self.roundTimer.start(100)
def end_round(self, round):
self.ui.actionStartRound.setEnabled(True)
self.ui.menuProblems.setEnabled(True)
self.ui.actionAddScores.setEnabled(True)
self.labelCurrentRound.setText("Round\n " + str(round))
self.roundTimerRemaining = 0
self.roundTimer_tick()
def update_worker_input(self, workerInput):
def format_wi_line(line): return shorten_string(28, line)
wiString = "\n".join(list(map(format_wi_line, workerInput)))
self.labelWorkerInput.setText("Worker input for next round:\n" + wiString)
def choose_problem(self, problemIdx):
self.roundTimer.stop()
self.reset_problem_list(self.problemList, problemIdx)
probDesc, probSpec, answerTime, startState = self.problemList[problemIdx]
self.labelProblemSpec.setText("Problem\n " + probDesc)
self.labelProblemTime.setText("Answer time\n " + str(answerTime/1000.0) + "s")
self.problemAnswerTime = answerTime
self.labelCurrentRound.setText("")
def update_all(self, running, workerList, problemList, problemIdx, round, workerInput, problemState):
self.clear_worker_table()
for id, name, group, proposition, caption, score, processedScore, problemScore, blocked, working in workerList:
self.add_worker(id, name, group, proposition, caption, score, processedScore, problemScore, blocked, working)
self.update_worker_input(workerInput)
if running:
self.start_round(round)
else:
self.end_round(round)
self.problemList = problemList
self.choose_problem(problemIdx)
def save_game_state_reply(self, result):
if result == "ok":
msg = "Game state successfully saved."
QtGui.QMessageBox.information(self, "Game state saved", msg, QtGui.QMessageBox.Ok)
else:
if result == "enoent" : msg = "No such file or directory!"
elif result == "enotdir": msg = "Not a directory!"
elif result == "enospc" : msg = "No space left on device!"
elif result == "eacces" : msg = "Permission denied!"
elif result == "eisdir" : msg = "Illegal operation on a directory!"
else : msg = "Unknown error: " + result
QtGui.QMessageBox.warning(self, "Error saving game state", msg, QtGui.QMessageBox.Ok)
def load_game_state_reply(self, result):
if result == "ok":
msg = "Game state successfully loaded."
QtGui.QMessageBox.information(self, "Game state loaded", msg, QtGui.QMessageBox.Ok)
else:
if result == "eformat": msg = "Invalid file format!"
elif result == "enoent" : msg = "No such file or directory!"
elif result == "enotdir": msg = "Not a directory!"
elif result == "eacces" : msg = "Permission denied!"
elif result == "eisdir" : msg = "Illegal operation on a directory!"
else : msg = "Unknown error: " + result
QtGui.QMessageBox.warning(self, "Error loading game state", msg, QtGui.QMessageBox.Ok)
#############################
## private functions ##
#############################
def send(self, msg):
self.ui.txtRecv.appendHtml("<span style='font-weight:bold;color:red'>send:</span> "
+ escape_html(msg).rstrip("\n").replace("\n","<br />"))
print(msg)
sys.stdout.flush()
def received(self, msg):
self.ui.txtRecv.appendHtml("<span style='font-weight:bold;color:blue'>recv:</span> "
+ escape_html(msg).rstrip("\n").replace("\n","<br />"))
def get_worker_table_row(self, id):
for row in range(0, self.ui.tableWorker.rowCount()):
if self.ui.tableWorker.item(row, 1).text() == id:
return row
return None
def clear_worker_table(self):
self.worker_blocked = {}
self.ui.tableWorker.clearContents()
self.ui.tableWorker.setRowCount(0)
def add_worker(self, id, name, group, proposition, propCaption, score, processedScore, problemScore, blocked, working):
if proposition == None:
proposition = ""
self.worker_blocked[id] = blocked != "no"
row = self.ui.tableWorker.rowCount()
self.ui.tableWorker.setRowCount(row + 1)
self.ui.tableWorker.setSortingEnabled(False)
item = QtGui.QTableWidgetItem()
item.setText(group)
self.ui.tableWorker.setItem(row, 0, item)
item = QtGui.QTableWidgetItem()
item.setText(id)
self.ui.tableWorker.setItem(row, 1, item)
item = QtGui.QTableWidgetItem()
item.setText(name)
self.ui.tableWorker.setItem(row, 2, item)
item = QtGui.QTableWidgetItem()
self.ui.tableWorker.setItem(row, 3, item)
item = CustomTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
self.ui.tableWorker.setItem(row, 4, item)
item = CustomTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
self.ui.tableWorker.setItem(row, 5, item)
item = CustomTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
self.ui.tableWorker.setItem(row, 6, item)
self.update_worker_by_row(row, id, proposition, propCaption, score, processedScore, problemScore, blocked, working)
self.ui.tableWorker.setSortingEnabled(True)
def update_worker_by_row(self, row, id, proposition, propCaption, score, processedScore, problemScore, blocked, working):
isBlocked = blocked != "no"
blockedIdx = blocked["idx"] if "idx" in blocked else 0
self.worker_blocked[id] = isBlocked
self.ui.tableWorker.setSortingEnabled(False)
brush = QtGui.QBrush(QtGui.QColor(190, 190, 190))
if self.worker_blocked[id]:
brush.setStyle(QtCore.Qt.SolidPattern)
else:
brush.setStyle(QtCore.Qt.NoBrush)
self.ui.tableWorker.item(row, 0).setBackground(brush)
self.ui.tableWorker.item(row, 1).setBackground(brush)
self.ui.tableWorker.item(row, 2).setBackground(brush)
item = self.ui.tableWorker.item(row, 3)
item.setText(propCaption)
item.setBackground(brush)
item = self.ui.tableWorker.item(row, 4)
item.setText(str(score))
item.setCustomSortData(isBlocked, {False: int(score), True: blockedIdx}[isBlocked])
item.setBackground(brush)
item = self.ui.tableWorker.item(row, 5)
item.setText(str(processedScore))
item.setCustomSortData(isBlocked, {False: int(processedScore), True: blockedIdx}[isBlocked])
item.setBackground(brush)
item = self.ui.tableWorker.item(row, 6)
item.setText(str(problemScore))
item.setCustomSortData(isBlocked, {False: int(problemScore), True: blockedIdx}[isBlocked])
item.setBackground(brush)
if self.ui.tableWorker.cellWidget(row, 2) == None:
if working:
self.ui.tableWorker.setCellWidget(row, 2, WorkingWidget(self))
else:
if not working:
self.ui.tableWorker.removeCellWidget(row, 2)
self.ui.tableWorker.setSortingEnabled(True)
def reset_problem_list(self, lst, checkedIdx=None):
self.problemList = lst
self.ui.menuProblems.clear()
if lst == []:
action = QtGui.QAction(self)
action.setText("--- no problems ---")
action.setEnabled(False)
self.ui.menuProblems.addAction(action)
else:
for idx, (description, spec, answerTime, state) in enumerate(lst):
action = QtGui.QAction(self)
action.setText(description + "\t" + str(answerTime/1000.0) + "s")
action.setCheckable(True)
if checkedIdx == idx:
action.setChecked(True)
QtCore.QObject.connect(action, QtCore.SIGNAL("triggered()"),
lambda i=idx, a=action, chk=(checkedIdx==idx):
self.btnChooseProblem_clicked(i, a, chk))
self.ui.menuProblems.addAction(action)
##################################################
class WorkingWidget(QtGui.QLabel):
def __init__(self, parent=None):
super(WorkingWidget, self).__init__(parent)
self.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
movie = QtGui.QMovie("./gears.gif")
self.setMovie(movie)
movie.start()
##################################################
class CustomTableWidgetItem(QtGui.QTableWidgetItem):
def __init__(self):
# call custom constructor with item type 'UserType'
QtGui.QTableWidgetItem.__init__(self, QtGui.QTableWidgetItem.UserType)
self.blocked = False
self.sortKey = 0
def setCustomSortData(self, blocked, sortKey):
self.blocked = blocked
self.sortKey = sortKey
# override the 'less than' operator
def __lt__(self, other):
if self.blocked == other.blocked:
return self.sortKey > other.sortKey
else:
return self.blocked < other.blocked
##################################################
def shorten_string(chars, string):
return (string[:(chars-3)] + '...') if len(string) > chars else string
def escape_html(str):
return str.replace("&","&").replace(">",">").replace("<","<")
##################################################
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
win = MainWindow()
win.show()
sys.exit(app.exec_())
|
apache-2.0
| -6,686,924,166,606,951,000
| 38.714286
| 124
| 0.639236
| false
| 3.791439
| false
| false
| false
|
au9ustine/elrond
|
elrond/aws/s3.py
|
1
|
3432
|
import os
import json
import threading
import boto3
from boto3.s3.transfer import S3Transfer
from elrond.crypto import get_file_digest
DEFAULT_CHUNK_SIZE = 64 * 1024 * 1024
ELROND_S3_SINGLETON_CLIENT = None
ELROND_S3_SUPPORTED_REGIONS = [
'EU',
'eu-west-1',
'us-west-1',
'us-west-2',
'ap-south-1',
'ap-southeast-1',
'ap-southeast-2',
'ap-northeast-1',
'sa-east-1',
'cn-north-1',
'eu-central-1'
]
def analyse(file_path):
res_st = os.stat(file_path, follow_symlinks=True)
return {
'mode': res_st.st_mode,
'atime': res_st.st_atime,
'mtime': res_st.st_mtime,
'ctime': res_st.st_ctime,
'size': res_st.st_size,
'digest': {
'algorithm': 'sha256',
'value': get_file_digest(file_path)
}
}
def update_metadata(file_path, metadata):
os.chmod(file_path, metadata['mode'])
os.utime(file_path, (metadata['atime'], metadata['mtime']))
def metadata2str(metadata):
return json.dumps(metadata,separators=(',', ':'))
def str2metadata(metadata_str):
return json.loads(metadata_str)
def chunk(stream, chuck_size=DEFAULT_CHUNK_SIZE):
for block in iter(lambda:stream.read(chuck_size),b''):
yield block
def get_client():
global ELROND_S3_SINGLETON_CLIENT
if ELROND_S3_SINGLETON_CLIENT is None:
ELROND_S3_SINGLETON_CLIENT = boto3.client('s3')
return ELROND_S3_SINGLETON_CLIENT
def get_buckets():
client = get_client()
return [bucket['Name'] for bucket in client.list_buckets()['Buckets']]
def get_bucket(bucket_name):
client = get_client()
if bucket_name in get_buckets():
location = client.get_bucket_location(
Bucket=bucket_name
)['LocationConstraint']
return (bucket_name, location)
else:
location = os.environ['AWS_DEFAULT_REGION']
assert location in ELROND_S3_SUPPORTED_REGIONS
res = client.create_bucket(
Bucket=bucket_name,
CreateBucketConfiguration={
'LocationConstraint': location
}
)
return (bucket_name, location)
class ProgressPercentage(object):
def __init__(self, filename):
self._filename = filename
self._size = float(os.path.getsize(filename))
self._seen_so_far = 0
self._lock = threading.Lock()
def __call__(self, bytes_amount):
# To simplify we'll assume this is hooked up
# to a single filename.
with self._lock:
self._seen_so_far += bytes_amount
percentage = (self._seen_so_far / self._size) * 100
sys.stdout.write(
"\\r%s %s / %s (%.2f%%)" % (
self._filename, self._seen_so_far, self._size,
percentage))
sys.stdout.flush()
def upload(bucket_name, file_path, key_name):
file_metadata = analyse(file_path)
if file_metadata['size'] > 100 * 1024 * 1024:
multipart_mode = True
client = get_client()
if multipart_mode:
pass
else:
transfer = S3Transfer(client)
transfer.upload_file(
file_path, bucket_name, key_name,
extra_args={
'ACL': 'private',
'Metadata': metadata2str(file_metadata),
'ContentType': 'application/octet-stream'
},
callback=ProgressPercentage(file_path))
|
mit
| 4,340,258,530,075,880,400
| 27.363636
| 74
| 0.586247
| false
| 3.473684
| false
| false
| false
|
cerrno/neurokernel
|
examples/timing/run_gpu_slow.py
|
2
|
1898
|
#!/usr/bin/env python
"""
Run timing test (GPU) scaled over number of ports.
"""
import csv
import glob
import multiprocessing as mp
import os
import re
import subprocess
import sys
import numpy as np
from neurokernel.tools.misc import get_pids_open
try:
from subprocess import DEVNULL
except ImportError:
import os
DEVNULL = open(os.devnull, 'wb')
out_file = sys.argv[1]
script_name = 'timing_demo_gpu_slow.py'
trials = 3
lpus = 2
def check_and_print_output(*args):
for i in xrange(5):
# CUDA < 7.0 doesn't properly clean up IPC-related files; since
# these can cause problems, we manually remove them before launching
# each job:
ipc_files = glob.glob('/dev/shm/cuda.shm*')
for ipc_file in ipc_files:
# Only remove files that are not being held open by any processes:
if not get_pids_open(ipc_file):
try:
os.remove(ipc_file)
except:
pass
try:
out = subprocess.check_output(*args, env=os.environ, stderr=DEVNULL)
except Exception as e:
out = e.output
if 'error' not in out:
break
print out,
return out
pool = mp.Pool(1)
results = []
for spikes in np.linspace(50, 15000, 25, dtype=int):
for i in xrange(trials):
r = pool.apply_async(check_and_print_output,
[['srun', '-n', '1', '-c', str(lpus+2),
'-p', 'huxley',
'--gres=gpu:%s' % lpus,
'python', script_name,
'-u', str(lpus), '-s', str(spikes),
'-g', '0', '-m', '50']])
results.append(r)
f = open(out_file, 'w', 0)
w = csv.writer(f)
for r in results:
w.writerow(r.get().strip('[]\n\"').split(', '))
f.close()
|
bsd-3-clause
| 5,892,850,443,696,471,000
| 26.507246
| 80
| 0.53372
| false
| 3.560976
| false
| false
| false
|
hsoft/pluginbuilder
|
pluginbuilder/util.py
|
1
|
12467
|
import os, sys, zipfile, time
from modulegraph.find_modules import PY_SUFFIXES
from modulegraph.modulegraph import os_listdir
import macholib.util
def os_path_islink(path):
"""
os.path.islink with zipfile support.
Luckily zipfiles cannot contain symlink, therefore the implementation is
trivial.
"""
return os.path.islink(path)
def os_readlink(path):
"""
os.readlink with zipfile support.
Luckily zipfiles cannot contain symlink, therefore the implementation is
trivial.
"""
return os.readlink(path)
def os_path_isdir(path):
"""
os.path.isdir that understands zipfiles.
Assumes that you're checking a path the is the result of os_listdir and
might give false positives otherwise.
"""
while path.endswith('/') and path != '/':
path = path[:-1]
zf, zp = path_to_zip(path)
if zf is None:
return os.path.isdir(zp)
else:
zip = zipfile.ZipFile(zf)
try:
info = zip.getinfo(zp)
except KeyError:
return True
else:
# Not quite true, you can store information about directories in
# zipfiles, but those have a lash at the end of the filename
return False
def copy_resource(source, destination, dry_run=0):
"""
Copy a resource file into the application bundle
"""
if os.path.isdir(source):
# XXX: This is wrong, need to call ourselves recursively
if not dry_run:
if not os.path.exists(destination):
os.mkdir(destination)
for fn in os_listdir(source):
copy_resource(os.path.join(source, fn),
os.path.join(destination, fn), dry_run=dry_run)
else:
copy_file_data(source, destination, dry_run=dry_run)
def copy_file_data(source, destination, dry_run=0):
zf, zp = path_to_zip(source)
if zf is None:
data = open(zp,'rb').read()
else:
data = get_zip_data(zf, zp)
if not dry_run:
fp = open(destination, 'wb')
fp.write(data)
fp.close()
def get_zip_data(path_to_zip, path_in_zip):
zf = zipfile.ZipFile(path_to_zip)
return zf.read(path_in_zip)
def path_to_zip(path):
"""
Returns (pathtozip, pathinzip). If path isn't in a zipfile pathtozip
will be None
"""
orig_path = path
from distutils.errors import DistutilsFileError
if os.path.exists(path):
return (None, path)
else:
rest = ''
while not os.path.exists(path):
path, r = os.path.split(path)
if not path:
raise DistutilsFileError("File doesn't exist: %s"%(orig_path,))
rest = os.path.join(r, rest)
if not os.path.isfile(path):
# Directory really doesn't exist
raise DistutilsFileError("File doesn't exist: %s"%(orig_path,))
try:
zf = zipfile.ZipFile(path)
except zipfile.BadZipfile:
raise DistutilsFileError("File doesn't exist: %s"%(orig_path,))
if rest.endswith('/'):
rest = rest[:-1]
return path, rest
def get_mtime(path, mustExist=True):
"""
Get mtime of a path, even if it is inside a zipfile
"""
try:
return os.stat(path).st_mtime
except os.error:
from distutils.errors import DistutilsFileError
try:
path, rest = path_to_zip(path)
except DistutilsFileError:
if not mustExist:
return -1
raise
zf = zipfile.ZipFile(path)
info = zf.getinfo(rest)
return time.mktime(info.date_time + (0, 0, 0))
def newer(source, target):
"""
distutils.dep_utils.newer with zipfile support
"""
msource = get_mtime(source)
mtarget = get_mtime(target, mustExist=False)
return msource > mtarget
def is_python_package(path):
"""Returns whether `path` is a python package (has a __init__.py(c|o) file).
"""
if os_path_isdir(path):
for p in os_listdir(path):
if p.startswith('__init__.') and p[8:] in {'.py', '.pyc', '.pyo'}:
return True
return False
def make_exec(path):
mask = os.umask(0)
os.umask(mask)
os.chmod(path, os.stat(path).st_mode | (0o111 & ~mask))
def makedirs(path):
if not os.path.exists(path):
os.makedirs(path)
def mergecopy(src, dest):
return macholib.util.mergecopy(src, dest)
def mergetree(src, dst, condition=None, copyfn=mergecopy):
"""Recursively merge a directory tree using mergecopy()."""
return macholib.util.mergetree(src, dst, condition=condition, copyfn=copyfn)
def move(src, dst):
return macholib.util.move(src, dst)
LOADER = """
def __load():
import imp, os, sys, os.path
ext = %r
library_path = os.environ['LIBRARYPATH']
dynload_path = os.path.join(library_path, 'lib-dynload')
ext = os.path.join(dynload_path, ext)
if os.path.exists(ext):
mod = imp.load_dynamic(__name__, ext)
else:
raise ImportError(repr(ext) + " not found")
__load()
del __load
"""
def make_loader(fn):
return LOADER % fn
def byte_compile(py_files, optimize=0, force=0,
target_dir=None, verbose=1, dry_run=0,
direct=None):
if direct is None:
direct = (__debug__ and optimize == 0)
# "Indirect" byte-compilation: write a temporary script and then
# run it with the appropriate flags.
if not direct:
from tempfile import mktemp
from distutils.util import execute, spawn
script_name = mktemp(".py")
if verbose:
print("writing byte-compilation script '%s'" % script_name)
if not dry_run:
script = open(script_name, "w")
script.write("""
from pluginbuilder.util import byte_compile
from modulegraph.modulegraph import *
files = [
""")
for f in py_files:
script.write(repr(f) + ",\n")
script.write("]\n")
script.write("""
byte_compile(files, optimize=%r, force=%r,
target_dir=%r,
verbose=%r, dry_run=0,
direct=1)
""" % (optimize, force, target_dir, verbose))
script.close()
cmd = [sys.executable, script_name]
if optimize == 1:
cmd.insert(1, "-O")
elif optimize == 2:
cmd.insert(1, "-OO")
spawn(cmd, verbose=verbose, dry_run=dry_run)
execute(os.remove, (script_name,), "removing %s" % script_name,
verbose=verbose, dry_run=dry_run)
else:
from py_compile import compile
from distutils.dir_util import mkpath
for mod in py_files:
# Terminology from the py_compile module:
# cfile - byte-compiled file
# dfile - purported source filename (same as 'file' by default)
if mod.filename == mod.identifier:
cfile = os.path.basename(mod.filename)
dfile = cfile + (__debug__ and 'c' or 'o')
else:
cfile = mod.identifier.replace('.', os.sep)
if mod.packagepath:
dfile = cfile + os.sep + '__init__.py' + (__debug__ and 'c' or 'o')
else:
dfile = cfile + '.py' + (__debug__ and 'c' or 'o')
if target_dir:
cfile = os.path.join(target_dir, dfile)
if force or newer(mod.filename, cfile):
if verbose:
print("byte-compiling %s to %s" % (mod.filename, dfile))
if not dry_run:
mkpath(os.path.dirname(cfile))
suffix = os.path.splitext(mod.filename)[1]
if suffix in ('.py', '.pyw'):
zfile, pth = path_to_zip(mod.filename)
if zfile is None:
compile(mod.filename, cfile, dfile)
else:
fn = dfile + '.py'
open(fn, 'wb').write(get_zip_data(zfile, pth))
compile(mod.filename, cfile, dfile)
os.unlink(fn)
elif suffix in PY_SUFFIXES:
# Minor problem: This will happily copy a file
# <mod>.pyo to <mod>.pyc or <mod>.pyc to
# <mod>.pyo, but it does seem to work.
copy_file_data(mod.filename, cfile)
else:
raise RuntimeError \
("Don't know how to handle %r" % mod.filename)
else:
if verbose:
print("skipping byte-compilation of %s to %s" % \
(mod.filename, dfile))
SCMDIRS = {'CVS', '.svn', '.hg', '.git'}
def skipscm(ofn):
fn = os.path.basename(ofn)
if fn in SCMDIRS:
return False
return True
def iter_platform_files(path, is_platform_file=macholib.util.is_platform_file):
"""
Iterate over all of the platform files in a directory
"""
for root, dirs, files in os.walk(path):
for fn in files:
fn = os.path.join(root, fn)
if is_platform_file(fn):
yield fn
def copy_tree(src, dst,
preserve_mode=1,
preserve_times=1,
preserve_symlinks=0,
update=0,
verbose=0,
dry_run=0,
condition=None):
"""
Copy an entire directory tree 'src' to a new location 'dst'. Both
'src' and 'dst' must be directory names. If 'src' is not a
directory, raise DistutilsFileError. If 'dst' does not exist, it is
created with 'mkpath()'. The end result of the copy is that every
file in 'src' is copied to 'dst', and directories under 'src' are
recursively copied to 'dst'. Return the list of files that were
copied or might have been copied, using their output name. The
return value is unaffected by 'update' or 'dry_run': it is simply
the list of all files under 'src', with the names changed to be
under 'dst'.
'preserve_mode' and 'preserve_times' are the same as for
'copy_file'; note that they only apply to regular files, not to
directories. If 'preserve_symlinks' is true, symlinks will be
copied as symlinks (on platforms that support them!); otherwise
(the default), the destination of the symlink will be copied.
'update' and 'verbose' are the same as for 'copy_file'.
"""
assert isinstance(src, str), repr(src)
assert isinstance(dst, str), repr(dst)
from distutils.dir_util import mkpath
from distutils.file_util import copy_file
from distutils.dep_util import newer
from distutils.errors import DistutilsFileError
from distutils import log
if condition is None:
condition = skipscm
if not dry_run and not os_path_isdir(src):
raise DistutilsFileError("cannot copy tree '%s': not a directory" % src)
try:
names = os_listdir(src)
except os.error as xxx_todo_changeme:
(errno, errstr) = xxx_todo_changeme.args
if dry_run:
names = []
else:
raise DistutilsFileError("error listing files in '%s': %s" % (src, errstr))
if not dry_run:
mkpath(dst)
outputs = []
for n in names:
src_name = os.path.join(src, n)
dst_name = os.path.join(dst, n)
if (condition is not None) and (not condition(src_name)):
continue
if preserve_symlinks and os_path_islink(src_name):
link_dest = os_readlink(src_name)
log.info("linking %s -> %s", dst_name, link_dest)
if not dry_run:
if update and not newer(src, dst_name):
pass
else:
if os_path_islink(dst_name):
os.remove(dst_name)
os.symlink(link_dest, dst_name)
outputs.append(dst_name)
elif os_path_isdir(src_name):
outputs.extend(
copy_tree(src_name, dst_name, preserve_mode,
preserve_times, preserve_symlinks, update,
dry_run=dry_run, condition=condition))
else:
copy_file(src_name, dst_name, preserve_mode,
preserve_times, update, dry_run=dry_run)
outputs.append(dst_name)
return outputs
|
mit
| 2,080,846,094,289,265,200
| 30.722646
| 87
| 0.562365
| false
| 3.843095
| false
| false
| false
|
CliMT/climt-future
|
climt/_lib/rrtmg_lw/setup.py
|
1
|
2948
|
from setuptools import setup, Extension
from Cython.Distutils import build_ext
# This line only needed if building with NumPy in Cython file.
from numpy import get_include
from os import system
import os
# compile the fortran modules without linking
module_list = [
'parkind.f90',
'parrrtm.f90',
'rrlw_cld.f90',
'rrlw_con.f90',
'rrlw_kg01.f90',
'rrlw_kg02.f90',
'rrlw_kg03.f90',
'rrlw_kg04.f90',
'rrlw_kg05.f90',
'rrlw_kg06.f90',
'rrlw_kg07.f90',
'rrlw_kg08.f90',
'rrlw_kg09.f90',
'rrlw_kg10.f90',
'rrlw_kg11.f90',
'rrlw_kg12.f90',
'rrlw_kg13.f90',
'rrlw_kg14.f90',
'rrlw_kg15.f90',
'rrlw_kg16.f90',
'rrlw_ncpar.f90',
'rrlw_ref.f90',
'rrlw_tbl.f90',
'rrlw_vsn.f90',
'rrlw_wvn.f90']
sources_list = [
'rrtmg_lw_cldprop.f90',
'rrtmg_lw_cldprmc.f90',
'rrtmg_lw_rtrn.f90',
'rrtmg_lw_rtrnmr.f90',
'rrtmg_lw_rtrnmc.f90',
'rrtmg_lw_setcoef.f90',
'rrtmg_lw_taumol.f90',
'rrtmg_lw_rad.nomcica.f90',
'mcica_random_numbers.f90',
'rrtmg_lw_init.f90',
'mcica_subcol_gen_lw.f90',
'rrtmg_lw_rad.f90',
'rrtmg_lw_c_binder.f90']
unoptimised_sources_list = [
'rrtmg_lw_k_g.f90',
]
object_file_list = []
fc = os.getenv('FC', 'gfortran ')
fflags = os.getenv('FFLAGS', ' -fPIC -fno-range-check ')
cflags = os.getenv('CFLAGS', '-fPIC')
f_opt_flags = os.getenv('CLIMT_OPTIMIZE_FLAG', '-O3')
f_no_opt_flags = os.getenv('CLIMT_NO_OPTIMIZE_FLAG', ' -O0 ')
ldflags = os.getenv('LDFLAGS', '-lgfortran')
print('Compiling Modules')
for module in module_list:
output_file = module[:-3]+'o'
object_file_list.append(output_file)
compilation_command = fc+module+' -c -o '+output_file+' '+f_opt_flags+fflags
print(compilation_command)
system(compilation_command)
print('Compiling Sources')
for source in sources_list:
output_file = source[:-3]+'o'
object_file_list.append(output_file)
compilation_command = fc+source+' -c -o '+output_file+' '+f_opt_flags+fflags
print(compilation_command)
system(compilation_command)
print('Compiling k coefficient tables')
for source in unoptimised_sources_list:
output_file = source[:-3]+'o'
object_file_list.append(output_file)
compilation_command = fc+source+' -c -o '+output_file+f_no_opt_flags+fflags
print(compilation_command)
system(compilation_command)
link_args_list = object_file_list + [ldflags]
ext_modules = [
Extension( # module name:
'_rrtm_lw',
# source file:
['_rrtm_lw.pyx'],
# other compile args for gcc
extra_compile_args=[cflags, f_opt_flags, ldflags],
# other files to link to
extra_link_args=link_args_list)]
setup(name='_rrtm_lw',
cmdclass={'build_ext': build_ext},
# Needed if building with NumPy.
# This includes the NumPy headers when compiling.
include_dirs=[get_include()],
ext_modules=ext_modules)
|
bsd-3-clause
| -8,972,440,418,118,426,000
| 25.8
| 80
| 0.637042
| false
| 2.615794
| false
| false
| false
|
hachreak/invenio-accounts
|
invenio_accounts/models.py
|
1
|
5487
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015, 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Database models for accounts."""
from __future__ import absolute_import, print_function
from datetime import datetime
from flask import current_app, session
from flask_security import RoleMixin, UserMixin
from invenio_db import db
from sqlalchemy.orm import validates
from sqlalchemy_utils import IPAddressType, Timestamp
userrole = db.Table(
'accounts_userrole',
db.Column('user_id', db.Integer(), db.ForeignKey(
'accounts_user.id', name='fk_accounts_userrole_user_id')),
db.Column('role_id', db.Integer(), db.ForeignKey(
'accounts_role.id', name='fk_accounts_userrole_role_id')),
)
"""Relationship between users and roles."""
class Role(db.Model, RoleMixin):
"""Role data model."""
__tablename__ = "accounts_role"
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(80), unique=True)
"""Role name."""
description = db.Column(db.String(255))
"""Role description."""
def __str__(self):
"""Return the name and description of the role."""
return '{0.name} - {0.description}'.format(self)
class User(db.Model, UserMixin):
"""User data model."""
__tablename__ = "accounts_user"
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(255), unique=True)
"""User email."""
password = db.Column(db.String(255))
"""User password."""
active = db.Column(db.Boolean(name='active'))
"""Flag to say if the user is active or not ."""
confirmed_at = db.Column(db.DateTime)
"""When the user confirmed the email address."""
last_login_at = db.Column(db.DateTime)
"""When the user logged-in for the last time."""
current_login_at = db.Column(db.DateTime)
"""When user logged into the current session."""
last_login_ip = db.Column(IPAddressType, nullable=True)
"""Last user IP address."""
current_login_ip = db.Column(IPAddressType, nullable=True)
"""Current user IP address."""
login_count = db.Column(db.Integer)
"""Count how many times the user logged in."""
roles = db.relationship('Role', secondary=userrole,
backref=db.backref('users', lazy='dynamic'))
"""List of the user's roles."""
@validates('last_login_ip', 'current_login_ip')
def validate_ip(self, key, value):
"""Hack untrackable IP addresses."""
# NOTE Flask-Security stores 'untrackable' value to IPAddressType
# field. This incorrect value causes ValueError on loading
# user object.
if value == 'untrackable': # pragma: no cover
value = None
return value
def __str__(self):
"""Representation."""
return 'User <id={0.id}, email={0.email}>'.format(self)
class SessionActivity(db.Model, Timestamp):
"""User Session Activity model.
Instances of this model correspond to a session belonging to a user.
"""
__tablename__ = "accounts_user_session_activity"
sid_s = db.Column(db.String(255), primary_key=True)
"""Serialized Session ID. Used as the session's key in the kv-session
store employed by `flask-kvsession`.
Named here as it is in `flask-kvsession` to avoid confusion.
"""
user_id = db.Column(db.Integer, db.ForeignKey(
User.id, name='fk_accounts_session_activity_user_id'))
"""ID of user to whom this session belongs."""
user = db.relationship(User, backref='active_sessions')
ip = db.Column(db.String(80), nullable=True)
"""IP address."""
country = db.Column(db.String(3), nullable=True)
"""Country name."""
browser = db.Column(db.String(80), nullable=True)
"""User browser."""
browser_version = db.Column(db.String(30), nullable=True)
"""Browser version."""
os = db.Column(db.String(80), nullable=True)
"""User operative system name."""
device = db.Column(db.String(80), nullable=True)
"""User device."""
@classmethod
def query_by_expired(cls):
"""Query to select all expired sessions."""
lifetime = current_app.permanent_session_lifetime
expired_moment = datetime.utcnow() - lifetime
return cls.query.filter(cls.created < expired_moment)
@classmethod
def query_by_user(cls, user_id):
"""Query to select user sessions."""
return cls.query.filter_by(user_id=user_id)
@classmethod
def is_current(cls, sid_s):
"""Check if the session is the current one."""
return session.sid_s == sid_s
|
gpl-2.0
| 6,014,488,001,262,512,000
| 31.087719
| 76
| 0.659194
| false
| 3.839748
| false
| false
| false
|
khertan/KhtNotes
|
khtnotes/merge3/merge3.py
|
1
|
18192
|
# Copyright (C) 2005-2010 Canonical Ltd
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#from __future__ import absolute_import
# mbp: "you know that thing where cvs gives you conflict markers?"
# s: "i hate that."
import errors
import patiencediff
import textfile
def intersect(ra, rb):
"""Given two ranges return the range where they intersect or None.
>>> intersect((0, 10), (0, 6))
(0, 6)
>>> intersect((0, 10), (5, 15))
(5, 10)
>>> intersect((0, 10), (10, 15))
>>> intersect((0, 9), (10, 15))
>>> intersect((0, 9), (7, 15))
(7, 9)
"""
# preconditions: (ra[0] <= ra[1]) and (rb[0] <= rb[1])
sa = max(ra[0], rb[0])
sb = min(ra[1], rb[1])
if sa < sb:
return sa, sb
else:
return None
def compare_range(a, astart, aend, b, bstart, bend):
"""Compare a[astart:aend] == b[bstart:bend], without slicing.
"""
if (aend - astart) != (bend - bstart):
return False
for ia, ib in zip(xrange(astart, aend), xrange(bstart, bend)):
if a[ia] != b[ib]:
return False
else:
return True
class Merge3(object):
"""3-way merge of texts.
Given BASE, OTHER, THIS, tries to produce a combined text
incorporating the changes from both BASE->OTHER and BASE->THIS.
All three will typically be sequences of lines."""
def __init__(self, base, a, b, is_cherrypick=False, allow_objects=False):
"""Constructor.
:param base: lines in BASE
:param a: lines in A
:param b: lines in B
:param is_cherrypick: flag indicating if this merge is a cherrypick.
When cherrypicking b => a, matches with b and base do not conflict.
:param allow_objects: if True, do not require that base, a and b are
plain Python strs. Also prevents BinaryFile from being raised.
Lines can be any sequence of comparable and hashable Python
objects.
"""
if not allow_objects:
textfile.check_text_lines(base)
textfile.check_text_lines(a)
textfile.check_text_lines(b)
self.base = base
self.a = a
self.b = b
self.is_cherrypick = is_cherrypick
def merge_lines(self,
name_a=None,
name_b=None,
name_base=None,
start_marker='<<<<<<<',
mid_marker='=======',
end_marker='>>>>>>>',
base_marker=None,
reprocess=False):
"""Return merge in cvs-like form.
"""
newline = '\n'
if len(self.a) > 0:
if self.a[0].endswith('\r\n'):
newline = '\r\n'
elif self.a[0].endswith('\r'):
newline = '\r'
if base_marker and reprocess:
raise errors.CantReprocessAndShowBase()
if name_a:
start_marker = start_marker + ' ' + name_a
if name_b:
end_marker = end_marker + ' ' + name_b
if name_base and base_marker:
base_marker = base_marker + ' ' + name_base
merge_regions = self.merge_regions()
if reprocess is True:
merge_regions = self.reprocess_merge_regions(merge_regions)
for t in merge_regions:
what = t[0]
if what == 'unchanged':
for i in range(t[1], t[2]):
yield self.base[i]
elif what == 'a' or what == 'same':
for i in range(t[1], t[2]):
yield self.a[i]
elif what == 'b':
for i in range(t[1], t[2]):
yield self.b[i]
elif what == 'conflict':
yield start_marker + newline
for i in range(t[3], t[4]):
yield self.a[i]
if base_marker is not None:
yield base_marker + newline
for i in range(t[1], t[2]):
yield self.base[i]
yield mid_marker + newline
for i in range(t[5], t[6]):
yield self.b[i]
yield end_marker + newline
else:
raise ValueError(what)
def merge(self):
"""Return merge"""
for t in self.merge_regions():
what = t[0]
if what == 'unchanged':
for i in range(t[1], t[2]):
yield self.base[i]
elif what == 'a' or what == 'same':
for i in range(t[1], t[2]):
yield self.a[i]
elif what == 'b':
for i in range(t[1], t[2]):
yield self.b[i]
elif what == 'conflict':
for i in range(t[3], t[4]):
yield self.a[i]
for i in range(t[5], t[6]):
yield self.b[i]
else:
raise ValueError(what)
def merge_annotated(self):
"""Return merge with conflicts, showing origin of lines.
Most useful for debugging merge.
"""
for t in self.merge_regions():
what = t[0]
if what == 'unchanged':
for i in range(t[1], t[2]):
yield 'u | ' + self.base[i]
elif what == 'a' or what == 'same':
for i in range(t[1], t[2]):
yield what[0] + ' | ' + self.a[i]
elif what == 'b':
for i in range(t[1], t[2]):
yield 'b | ' + self.b[i]
elif what == 'conflict':
yield '<<<<\n'
for i in range(t[3], t[4]):
yield 'A | ' + self.a[i]
yield '----\n'
for i in range(t[5], t[6]):
yield 'B | ' + self.b[i]
yield '>>>>\n'
else:
raise ValueError(what)
def merge_groups(self):
"""Yield sequence of line groups. Each one is a tuple:
'unchanged', lines
Lines unchanged from base
'a', lines
Lines taken from a
'same', lines
Lines taken from a (and equal to b)
'b', lines
Lines taken from b
'conflict', base_lines, a_lines, b_lines
Lines from base were changed to either a or b and conflict.
"""
for t in self.merge_regions():
what = t[0]
if what == 'unchanged':
yield what, self.base[t[1]:t[2]]
elif what == 'a' or what == 'same':
yield what, self.a[t[1]:t[2]]
elif what == 'b':
yield what, self.b[t[1]:t[2]]
elif what == 'conflict':
yield (what,
self.base[t[1]:t[2]],
self.a[t[3]:t[4]],
self.b[t[5]:t[6]])
else:
raise ValueError(what)
def merge_regions(self):
"""Return sequences of matching and conflicting regions.
This returns tuples, where the first value says what kind we
have:
'unchanged', start, end
Take a region of base[start:end]
'same', astart, aend
b and a are different from base but give the same result
'a', start, end
Non-clashing insertion from a[start:end]
Method is as follows:
The two sequences align only on regions which match the base
and both descendents. These are found by doing a two-way diff
of each one against the base, and then finding the
intersections between those regions. These "sync regions"
are by definition unchanged in both and easily dealt with.
The regions in between can be in any of three cases:
conflicted, or changed on only one side.
"""
# section a[0:ia] has been disposed of, etc
iz = ia = ib = 0
for zmatch, zend, amatch, aend, \
bmatch, bend in self.find_sync_regions():
matchlen = zend - zmatch
# invariants:
# matchlen >= 0
# matchlen == (aend - amatch)
# matchlen == (bend - bmatch)
len_a = amatch - ia
len_b = bmatch - ib
#len_base = zmatch - iz
# invariants:
# assert len_a >= 0
# assert len_b >= 0
# assert len_base >= 0
#print 'unmatched a=%d, b=%d' % (len_a, len_b)
if len_a or len_b:
# try to avoid actually slicing the lists
same = compare_range(self.a, ia, amatch,
self.b, ib, bmatch)
if same:
yield 'same', ia, amatch
else:
equal_a = compare_range(self.a, ia, amatch,
self.base, iz, zmatch)
equal_b = compare_range(self.b, ib, bmatch,
self.base, iz, zmatch)
if equal_a and not equal_b:
yield 'b', ib, bmatch
elif equal_b and not equal_a:
yield 'a', ia, amatch
elif not equal_a and not equal_b:
if self.is_cherrypick:
for node in self._refine_cherrypick_conflict(
iz, zmatch, ia, amatch,
ib, bmatch):
yield node
else:
yield 'conflict', \
iz, zmatch, ia, amatch, ib, bmatch
else:
raise AssertionError(
"can't handle a=b=base but unmatched")
ia = amatch
ib = bmatch
iz = zmatch
# if the same part of the base was deleted on both sides
# that's OK, we can just skip it.
if matchlen > 0:
# invariants:
# assert ia == amatch
# assert ib == bmatch
# assert iz == zmatch
yield 'unchanged', zmatch, zend
iz = zend
ia = aend
ib = bend
def _refine_cherrypick_conflict(self, zstart,
zend, astart, aend, bstart, bend):
"""When cherrypicking b => a, ignore matches with b and base."""
# Do not emit regions which match, only regions which do not match
matches = patiencediff.PatienceSequenceMatcher(None,
self.base[zstart:zend], self.b[bstart:bend]).get_matching_blocks()
last_base_idx = 0
last_b_idx = 0
last_b_idx = 0
yielded_a = False
for base_idx, b_idx, match_len in matches:
#conflict_z_len = base_idx - last_base_idx
conflict_b_len = b_idx - last_b_idx
if conflict_b_len == 0: # There are no lines in b which conflict,
# so skip it
pass
else:
if yielded_a:
yield ('conflict',
zstart + last_base_idx, zstart + base_idx,
aend, aend, bstart + last_b_idx, bstart + b_idx)
else:
# The first conflict gets the a-range
yielded_a = True
yield ('conflict', zstart + last_base_idx, zstart +
base_idx,
astart, aend, bstart + last_b_idx, bstart + b_idx)
last_base_idx = base_idx + match_len
last_b_idx = b_idx + match_len
if last_base_idx != zend - zstart or last_b_idx != bend - bstart:
if yielded_a:
yield ('conflict', zstart + last_base_idx, zstart + base_idx,
aend, aend, bstart + last_b_idx, bstart + b_idx)
else:
# The first conflict gets the a-range
yielded_a = True
yield ('conflict', zstart + last_base_idx, zstart + base_idx,
astart, aend, bstart + last_b_idx, bstart + b_idx)
if not yielded_a:
yield ('conflict', zstart, zend, astart, aend, bstart, bend)
def reprocess_merge_regions(self, merge_regions):
"""Where there are conflict regions, remove the agreed lines.
Lines where both A and B have made the same changes are
eliminated.
"""
for region in merge_regions:
if region[0] != "conflict":
yield region
continue
type, iz, zmatch, ia, amatch, ib, bmatch = region
a_region = self.a[ia:amatch]
b_region = self.b[ib:bmatch]
matches = patiencediff.PatienceSequenceMatcher(
None, a_region, b_region).get_matching_blocks()
next_a = ia
next_b = ib
for region_ia, region_ib, region_len in matches[:-1]:
region_ia += ia
region_ib += ib
reg = self.mismatch_region(next_a, region_ia, next_b,
region_ib)
if reg is not None:
yield reg
yield 'same', region_ia, region_len + region_ia
next_a = region_ia + region_len
next_b = region_ib + region_len
reg = self.mismatch_region(next_a, amatch, next_b, bmatch)
if reg is not None:
yield reg
@staticmethod
def mismatch_region(next_a, region_ia, next_b, region_ib):
if next_a < region_ia or next_b < region_ib:
return 'conflict', None, None, next_a, region_ia, next_b, region_ib
def find_sync_regions(self):
"""Return a list of sync regions,where both descendents match the base.
Generates a list of (base1, base2, a1, a2, b1, b2). There is
always a zero-length sync region at the end of all the files.
"""
ia = ib = 0
amatches = patiencediff.PatienceSequenceMatcher(
None, self.base, self.a).get_matching_blocks()
bmatches = patiencediff.PatienceSequenceMatcher(
None, self.base, self.b).get_matching_blocks()
len_a = len(amatches)
len_b = len(bmatches)
sl = []
while ia < len_a and ib < len_b:
abase, amatch, alen = amatches[ia]
bbase, bmatch, blen = bmatches[ib]
# there is an unconflicted block at i; how long does it
# extend? until whichever one ends earlier.
i = intersect((abase, abase + alen), (bbase, bbase + blen))
if i:
intbase = i[0]
intend = i[1]
intlen = intend - intbase
# found a match of base[i[0], i[1]]; this may be less than
# the region that matches in either one
# assert intlen <= alen
# assert intlen <= blen
# assert abase <= intbase
# assert bbase <= intbase
asub = amatch + (intbase - abase)
bsub = bmatch + (intbase - bbase)
aend = asub + intlen
bend = bsub + intlen
# assert self.base[intbase:intend] == self.a[asub:aend], \
# (self.base[intbase:intend], self.a[asub:aend])
# assert self.base[intbase:intend] == self.b[bsub:bend]
sl.append((intbase, intend,
asub, aend,
bsub, bend))
# advance whichever one ends first in the base text
if (abase + alen) < (bbase + blen):
ia += 1
else:
ib += 1
intbase = len(self.base)
abase = len(self.a)
bbase = len(self.b)
sl.append((intbase, intbase, abase, abase, bbase, bbase))
return sl
def find_unconflicted(self):
"""Return a list of ranges in base that are not conflicted."""
am = patiencediff.PatienceSequenceMatcher(
None, self.base, self.a).get_matching_blocks()
bm = patiencediff.PatienceSequenceMatcher(
None, self.base, self.b).get_matching_blocks()
unc = []
while am and bm:
# there is an unconflicted block at i; how long does it
# extend? until whichever one ends earlier.
a1 = am[0][0]
a2 = a1 + am[0][2]
b1 = bm[0][0]
b2 = b1 + bm[0][2]
i = intersect((a1, a2), (b1, b2))
if i:
unc.append(i)
if a2 < b2:
del am[0]
else:
del bm[0]
return unc
def main(argv):
# as for diff3 and meld the syntax is "MINE BASE OTHER"
a = file(argv[1], 'rt').readlines()
base = file(argv[2], 'rt').readlines()
b = file(argv[3], 'rt').readlines()
m3 = Merge3(base, a, b)
#for sr in m3.find_sync_regions():
# print sr
# sys.stdout.writelines(m3.merge_lines(name_a=argv[1], name_b=argv[3]))
sys.stdout.writelines(m3.merge())
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
|
gpl-3.0
| -2,084,280,794,020,240,600
| 34.952569
| 79
| 0.484883
| false
| 3.930856
| false
| false
| false
|
tschalch/pyTray
|
src/setup.py
|
1
|
6933
|
#!/usr/bin/env python
#this installer script uses InnoSetup to generate a complete Installer
from distutils.core import setup
import py2exe
import os, os.path, sys
import glob
#adding lib directory to module search path
libpath = os.path.abspath(os.path.dirname(sys.argv[0])) + "/lib"
sys.path.append(os.path.abspath(libpath))
includes = ["encodings", "encodings.latin_1",]
#options = {"py2exe": {"compressed": 1,
# "optimize": 2,
# "ascii": 1,
# "bundle_files": 1,
# "includes":includes}},
################################################################
# A program using wxPython
# The manifest will be inserted as resource into test_wx.exe. This
# gives the controls the Windows XP appearance (if run on XP ;-)
#
# Another option would be to store if in a file named
# test_wx.exe.manifest, and probably copy it with the data_files
# option.
#
manifest_template = '''
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity
version="5.0.0.0"
processorArchitecture="x86"
name="%(prog)s"
type="win32"
/>
<description>%(prog)s Program</description>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="X86"
publicKeyToken="6595b64144ccf1df"
language="*"
/>
</dependentAssembly>
</dependency>
</assembly>
'''
RT_MANIFEST = 24
################################################################
# arguments for the setup() call
pyTray = dict(
script = "pytray.py",
other_resources = [(RT_MANIFEST, 1, manifest_template % dict(prog="pyTray"))],
dest_base = r"pyTray",
icon_resources = [(1,"files/images/icon.ico")])
zipfile = r"lib\shardlib"
options = {"py2exe": {"compressed": 1,
"optimize": 2}}
################################################################
import os
class InnoScript:
def __init__(self,
name,
lib_dir,
dist_dir,
windows_exe_files = [],
lib_files = [],
version = "1.0"):
self.lib_dir = lib_dir
self.dist_dir = dist_dir
if not self.dist_dir[-1] in "\\/":
self.dist_dir += "\\"
self.name = name
self.version = version
self.windows_exe_files = [self.chop(p) for p in windows_exe_files]
self.lib_files = [self.chop(p) for p in lib_files]
def chop(self, pathname):
assert pathname.startswith(self.dist_dir)
return pathname[len(self.dist_dir):]
def create(self, pathname="dist\\pytray.iss"):
self.pathname = pathname
ofi = self.file = open(pathname, "w")
print >> ofi, "; WARNING: This script has been created by py2exe. Changes to this script"
print >> ofi, "; will be overwritten the next time py2exe is run!"
print >> ofi, r"[Setup]"
print >> ofi, r"AppName=%s" % self.name
print >> ofi, r"AppVerName=%s %s" % (self.name, self.version)
print >> ofi, r"DefaultDirName={pf}\%s" % self.name
print >> ofi, r"DefaultGroupName=%s" % self.name
print >> ofi
print >> ofi, r"[Files]"
for path in self.windows_exe_files + self.lib_files:
print >> ofi, r'Source: "%s"; DestDir: "{app}\%s"; Flags: ignoreversion' % (path, os.path.dirname(path))
print >> ofi
print >> ofi, r"[Icons]"
for path in self.windows_exe_files:
print >> ofi, r'Name: "{group}\%s"; Filename: "{app}\%s"' % \
(self.name, path)
print >> ofi, 'Name: "{group}\Uninstall %s"; Filename: "{uninstallexe}"' % self.name
def compile(self):
try:
import ctypes
except ImportError:
try:
import win32api
except ImportError:
import os
os.startfile(self.pathname)
else:
print "Ok, using win32api."
win32api.ShellExecute(0, "compile",
self.pathname,
None,
None,
0)
else:
print "Cool, you have ctypes installed."
res = ctypes.windll.shell32.ShellExecuteA(0, "compile",
self.pathname,
None,
None,
0)
if res < 32:
raise RuntimeError, "ShellExecute failed, error %d" % res
################################################################
from py2exe.build_exe import py2exe
class build_installer(py2exe):
# This class first builds the exe file(s), then creates a Windows installer.
# You need InnoSetup for it.
def run(self):
# First, let py2exe do it's work.
py2exe.run(self)
lib_dir = self.lib_dir
dist_dir = self.dist_dir
# create the Installer, using the files py2exe has created.
script = InnoScript("pytray",
lib_dir,
dist_dir,
self.windows_exe_files,
self.lib_files)
print "*** creating the inno setup script***"
script.create()
print "*** compiling the inno setup script***"
script.compile()
# Note: By default the final setup.exe will be in an Output subdirectory.
################################################################
setup(
description='Cryallization Management Software',
options = options,
# The lib directory contains everything except the executables and the python dll.
zipfile = zipfile,
windows = [pyTray],
# use out build_installer class as extended py2exe build command
cmdclass = {"py2exe": build_installer},
data_files=[(r"files", glob.glob(r"files/*.*")),
(r"files/test", glob.glob(r"files/test/*.*")),
(r"files/Dtd", glob.glob(r"files/Dtd/*.*")),
(r"files/fonts", glob.glob(r"files/fonts/*.*")),
(r"files/images", glob.glob(r"files/images/*.*")),
],
author='Thomas Schalch',
author_email='schalch@cshl.edu',
packages = ["gui","dataStructures","util","test"],
)
|
bsd-3-clause
| 2,843,598,152,450,568,700
| 34.298429
| 116
| 0.492572
| false
| 4.078235
| false
| false
| false
|
tdegeus/GooseEYE
|
docs/examples/clusters_dilate_periodic.py
|
1
|
2926
|
r'''
Plot and/or check.
Usage:
script [options]
Options:
-s, --save Save output for later check.
-c, --check Check against earlier results.
-p, --plot Plot.
-h, --help Show this help.
'''
# <snippet>
import numpy as np
import GooseEYE
# generate image
I = np.zeros((21, 21), dtype='bool')
I[4, 4] = True
I[18, 19] = True
I[19, 19] = True
I[20, 19] = True
I[19, 18] = True
I[19, 20] = True
# clusters
C = GooseEYE.Clusters(I).labels()
# dilate
CD = GooseEYE.dilate(C)
# </snippet>
if __name__ == '__main__':
import docopt
args = docopt.docopt(__doc__)
if args['--save']:
import h5py
with h5py.File('clusters_dilate_periodic.h5', 'w') as data:
data['I'] = I
data['C'] = C
data['CD'] = CD
if args['--check']:
import h5py
with h5py.File('clusters_dilate_periodic.h5', 'r') as data:
assert np.all(np.equal(data['I'][...], I))
assert np.all(np.equal(data['C'][...], C))
assert np.all(np.equal(data['CD'][...], CD))
if args['--plot']:
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.cm as cm
from mpl_toolkits.axes_grid1 import make_axes_locatable
# color-scheme: modify such that the background is white
# N.B. for a transparent background -> 4th column == 1.
cmap = cm.jet(range(256))
cmap[0, :3] = 1.0
cmap = mpl.colors.ListedColormap(cmap)
try:
plt.style.use(['goose', 'goose-latex'])
except:
pass
fig, axes = plt.subplots(figsize=(18, 6), nrows=1, ncols=3)
ax = axes[0]
im = ax.imshow(I, clim=(0, 1), cmap=mpl.colors.ListedColormap(cm.gray([0, 255])))
ax.xaxis.set_ticks([0, 20])
ax.yaxis.set_ticks([0, 20])
ax.set_xlim([-0.5, 20.5])
ax.set_ylim([-0.5, 20.5])
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$y$')
ax.set_title (r'image')
div = make_axes_locatable(ax)
cax = div.append_axes("right", size="5%", pad=0.1)
cbar = plt.colorbar(im, cax=cax)
cbar.set_ticks([0, 1])
ax = axes[1]
im = ax.imshow(CD, clim=(0, np.max(C) + 1), cmap=cmap)
ax.xaxis.set_ticks([0, 20])
ax.yaxis.set_ticks([0, 20])
ax.set_xlim([-0.5, 20.5])
ax.set_ylim([-0.5, 20.5])
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$y$')
ax.set_title (r'clusters + dilate')
ax = axes[2]
im = ax.imshow(np.tile(CD, (3, 3)), clim=(0, np.max(C) + 1), cmap=cmap)
ax.xaxis.set_ticks([0, 60])
ax.yaxis.set_ticks([0, 60])
ax.set_xlim([-0.5, 60.5])
ax.set_ylim([-0.5, 60.5])
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$y$')
ax.set_title (r'periodic copy')
plt.savefig('clusters_dilate_periodic.svg')
|
gpl-3.0
| -889,297,396,052,922,000
| 25.125
| 89
| 0.520164
| false
| 2.89703
| false
| false
| false
|
drupdates/Slack
|
__init__.py
|
1
|
1072
|
""" Send report using Slack. """
from drupdates.settings import Settings
from drupdates.utils import Utils
from drupdates.constructors.reports import Report
import json, os
class Slack(Report):
""" Slack report plugin. """
def __init__(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
settings_file = current_dir + '/settings/default.yaml'
self.settings = Settings()
self.settings.add(settings_file)
def send_message(self, report_text):
""" Post the report to a Slack channel or DM a specific user."""
url = self.settings.get('slackURL')
user = self.settings.get('slackUser')
payload = {}
payload['text'] = report_text
payload['new-bot-name'] = user
direct = self.settings.get('slackRecipient')
channel = self.settings.get('slackChannel')
if direct:
payload['channel'] = '@' + direct
elif channel:
payload['channel'] = '#' + direct
Utils.api_call(url, 'Slack', 'post', data=json.dumps(payload))
|
mit
| 81,357,146,827,252,450
| 33.580645
| 72
| 0.616604
| false
| 4
| false
| false
| false
|
RudolfCardinal/pythonlib
|
cardinal_pythonlib/wsgi/headers_mw.py
|
1
|
4487
|
#!/usr/bin/env python
# cardinal_pythonlib/headers_mw.py
"""
===============================================================================
Original code copyright (C) 2009-2021 Rudolf Cardinal (rudolf@pobox.com).
This file is part of cardinal_pythonlib.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===============================================================================
**WSGI middleware to add arbitrary HTTP headers.**
"""
import logging
from cardinal_pythonlib.wsgi.constants import (
TYPE_WSGI_APP,
TYPE_WSGI_APP_RESULT,
TYPE_WSGI_ENVIRON,
TYPE_WSGI_EXC_INFO,
TYPE_WSGI_RESPONSE_HEADERS,
TYPE_WSGI_START_RESPONSE,
TYPE_WSGI_START_RESP_RESULT,
TYPE_WSGI_STATUS,
)
log = logging.getLogger(__name__)
class HeaderModifyMode(object):
"""
Options for
:class:`cardinal_pythonlib.wsgi.headers_mw.AddHeadersMiddleware`.
"""
ADD = 0
ADD_IF_ABSENT = 1
class AddHeadersMiddleware(object):
"""
WSGI middleware to add arbitrary HTTP headers.
See e.g. https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers for a
list of possible HTTP headers.
Note:
- HTTP headers are case-insensitive. However, the canonical form is
hyphenated camel case;
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers.
- You can specify the same HTTP header multiple times; apart from
Set-Cookie, this should have the effect of the browser treating them as
concatenated in a CSV format.
https://stackoverflow.com/questions/3096888;
https://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2
"""
def __init__(self,
app: TYPE_WSGI_APP,
headers: TYPE_WSGI_RESPONSE_HEADERS,
method: int = HeaderModifyMode.ADD) -> None:
"""
Args:
app:
The WSGI app to which to apply the middleware.
headers:
A list of tuples, each of the form ``(key, value)``.
"""
assert isinstance(headers, list)
for key_value_tuple in headers:
assert isinstance(key_value_tuple, tuple)
assert len(key_value_tuple) == 2
assert isinstance(key_value_tuple[0], str)
assert isinstance(key_value_tuple[1], str)
assert method in [
HeaderModifyMode.ADD,
HeaderModifyMode.ADD_IF_ABSENT,
]
self.app = app
self.headers = headers
self.method = method
def __call__(self,
environ: TYPE_WSGI_ENVIRON,
start_response: TYPE_WSGI_START_RESPONSE) \
-> TYPE_WSGI_APP_RESULT:
"""
Called every time the WSGI app is used.
"""
def add(status: TYPE_WSGI_STATUS,
headers: TYPE_WSGI_RESPONSE_HEADERS,
exc_info: TYPE_WSGI_EXC_INFO = None) \
-> TYPE_WSGI_START_RESP_RESULT:
# Add headers. If they were present already, there will be
# several versions now. See above.
return start_response(status, headers + self.headers, exc_info)
def add_if_absent(status: TYPE_WSGI_STATUS,
headers: TYPE_WSGI_RESPONSE_HEADERS,
exc_info: TYPE_WSGI_EXC_INFO = None) \
-> TYPE_WSGI_START_RESP_RESULT:
# Add headers, but not if that header was already present.
# Note case-insensitivity.
header_keys_lower = [kv[0].lower() for kv in headers]
new_headers = [x for x in self.headers
if x[0].lower() not in header_keys_lower]
return start_response(status, headers + new_headers, exc_info)
method = self.method
if method == HeaderModifyMode.ADD:
custom_start_response = add
else:
custom_start_response = add_if_absent
return self.app(environ, custom_start_response)
|
apache-2.0
| 8,220,256,760,036,851,000
| 32.992424
| 79
| 0.592378
| false
| 4.197381
| false
| false
| false
|
aep124/TwitterAnalyticsTools
|
textonly.py
|
1
|
2405
|
# this is a script to retrieve and process text-only data for classification
# This process includes four main tasks
# 1) getting raw tweets
# 2) apply labels (this step can be conducted at any time)
# 2) filtering those tweets (e.g., according to CMU POS tagger)
# 3) deriving a set of features (a.k.a. word list)
# 4) write the feature vectors to an arff file
import tools4pgs
import tools4parsing
import tools4fv
import tools4labeling
import pickle
import copy
import numpy as np
import pandas as pd
# dividing into two dataframe because tweet info is fixed, but features are flexible
# tweet info data frame columns:
# NAME DATATYPE
# twtid ....... string (of digits)
# raw ......... string
# filtered .... string
# userid ...... string (of digits)
# handle ...... string
# label ....... string
# imgurl ...... string
# tweet features data frame columns
# twtid ....... string (of digits)
# feature 1 ... TF score for word 1
# feature 2 ... TF score for word 2
# :
# feature n ... TF score for word n
# label ....... string
############### (1) Get Tweets ################
# TODO: modify query handling to accomodate the column names that databases use, as well as subsets query variables
# (this is written for robbery database)
query = 'SELECT id,text,user_id FROM tweets'
condition = "WHERE text like '%bears%'"
tools4pgs.writetwtinfo(query, condition, 'twtinfo.p')
############### (2) Apply Labels ###############
labelmap = tools4labeling.getlabelmap('labelsystem')
tools4labeling.writelabels('twtinfo.p', labelmap)
################# (3) Filter ################
keepset = tools4parsing.getkeepset('POS2keep')
tools4parsing.writefiltered('twtinfo.p', keepset)
# TODO: add functionality for reply tweets (conversations) ????????
############## (4) Derive Features ##############
wordmap = tools4fv.getwordmap('twtinfo.p')
wordlist = wordmap.keys()
# specify threshold directly :
# freq_threshold = 2
# could also specify threshold by number of words (e.g., 500):
# freq_threshold = sorted(wordmap.values())[-500]
# wordlist = [w for w in wordmap.keys() if wordmap[w] >= freq_threshold]
tools4fv.writetf('twtinfo.p','twtfeatures.p', wordlist)
tools4fv.synclabels('twtinfo.p','twtfeatures.p')
############### (5) Make ARFF File ###############
#tools4fv.writearff('twtfeatures.p')
|
mit
| 2,456,822,204,111,772,700
| 24.585106
| 116
| 0.642827
| false
| 3.358939
| false
| false
| false
|
daicang/Leetcode-solutions
|
146-lru-cache.py
|
1
|
1792
|
class DLNode(object):
def __init__(self):
self.key = None
self.value = None
self.prev = None
self.next = None
class LRUCache(object):
def __init__(self, capacity):
"""
:type capacity: int
"""
self.head = DLNode()
self.tail = DLNode()
self.capacity = capacity
self.size = 0
self.cache = {}
self.head.next = self.tail
self.tail.prev = self.head
def _move_to_head(self, node):
node.prev = self.head
node.next = self.head.next
node.prev.next = node
node.next.prev = node
def _unlink_node(self, node):
node.prev.next = node.next
node.next.prev = node.prev
def get(self, key):
"""
:type key: int
:rtype: int
"""
node = self.cache.get(key)
if node is None:
return -1
self._unlink_node(node)
self._move_to_head(node)
return node.value
def put(self, key, value):
"""
:type key: int
:type value: int
:rtype: None
"""
node = self.cache.get(key)
if node:
node.value = value
self._unlink_node(node)
self._move_to_head(node)
return
node = DLNode()
node.key = key
node.value = value
self.cache[key] = node
self._move_to_head(node)
self.size += 1
if self.size > self.capacity:
outdated = self.tail.prev
self._unlink_node(outdated)
del self.cache[outdated.key]
self.size -= 1
c = LRUCache(2)
c.put(1, 1)
c.put(2, 2)
print c.get(1)
c.put(3, 3)
print c.get(2)
c.put(4, 4)
print c.get(1)
print c.get(3)
print c.get(4)
|
mit
| 3,490,341,262,473,513,000
| 19.363636
| 40
| 0.497768
| false
| 3.419847
| false
| false
| false
|
brettchien/LeetCode
|
9_PalindromeNumber.py
|
1
|
3651
|
class Solution:
# @param {integer} x
# @return {boolean}
def isPalindrome(self, x):
if x < 0:
return False
if x > 0 and x % 10 == 0:
return False
reverse = 0
while x > reverse:
reverse = reverse * 10 + x % 10
x /= 10
return (x == reverse) or (x == reverse / 10)
def cisPalindrome(self, x):
if x < 0:
return False
if x < 10:
return True
if x < 100:
hi = x / 10
lo = x % 10
return hi == lo
pivot = 1
count = 0
while pivot <= x:
count += 1
pivot *= 10
digits = count / 2
first = x / (10 ** (digits + (count % 2)))
second = x % (10 ** digits)
print x, first, second
while digits >= 1:
print first, second
if digits == 1:
return first == second
lo = second % 10
hi = first / (10 ** (digits-1))
print hi, lo
if hi != lo:
return False
else:
first = first % (10 ** (digits-1))
second = second / 10
digits -= 1
def bisPalindrome(self, x):
if x < 0:
return False
if x < 10:
return True
if x < 100:
hi = x / 10
lo = x % 10
return hi == lo
pivot = 1
count = 1
while pivot <= x:
count += 1
pivot *= 10
count -= 1
odd = (count % 2 == 1)
print x, pivot, count
while x > 0:
print x
digit = x % 10
pivot /= 100
x /= 10
hiDigit = x / pivot
print pivot, x, digit, hiDigit
if hiDigit != digit:
return False
x -= digit * pivot
if x == 0:
return True
print x
if odd:
if pivot == 10:
return True
else:
if pivot == 100:
hi = x / 10
lo = x % 10
return hi == lo
def aisPalindrome(self, x):
if x < 0:
return False
if x < 10:
return True
if x == 10:
return False
pivot = 1
count = 1
while pivot <= x:
count += 1
pivot *= 10
count -= 1
print x, pivot, count
while x > 0:
print x
digit = x % 10
pivot /= 100
x /= 10
if digit == 0 and pivot > x:
continue
if count % 2 == 0: #even numbers of digits
if pivot == 10:
return x == digit
else: # odd numbers of digits
if pivot == 1:
return True
check = x - digit * pivot
print pivot, x, digit, check
if check == 0:
return True
elif check < 0 or check >= digit * pivot:
return False
else:
x -= digit * pivot
if __name__ == "__main__":
sol = Solution()
print sol.isPalindrome(121) == True
print sol.isPalindrome(101) == True
print sol.isPalindrome(100) == False
print sol.isPalindrome(9999) == True
print sol.isPalindrome(99999) == True
print sol.isPalindrome(999999) == True
print sol.isPalindrome(1000110001) == True
print sol.isPalindrome(1000021) == False
|
mit
| 3,103,546,097,456,922,600
| 26.870229
| 54
| 0.398247
| false
| 4.474265
| false
| false
| false
|
felipemontefuscolo/bitme
|
get_bitmex_candles.py
|
1
|
4122
|
#!/usr/bin/env python
import sys
import time
import swagger_client
from swagger_client.rest import ApiException
from utils.utils import smart_open
import argparse
import pandas as pd
MAX_NUM_CANDLES_BITMEX = 500
def print_file(file_or_stdout, api_instance, bin_size, partial, symbol, reverse, start_time, end_time):
chunks = split_in_chunks(start_time, end_time, MAX_NUM_CANDLES_BITMEX, bin_size)
with smart_open(file_or_stdout) as fh:
print("time,open,high,low,close,volume", file=fh)
num_pages = len(chunks)
for i in range(num_pages):
chunk = chunks[i]
s = chunk[0]
e = chunk[1]
count = (e - s) / pd.Timedelta(bin_size)
page = api_instance.trade_get_bucketed(
bin_size=bin_size,
partial=partial,
symbol=symbol,
count=count,
start=0.0,
reverse=reverse,
start_time=s,
end_time=e)
print("from {} to {}: {} candles downloaded".format(s, e, len(page)))
# TODO: bitmex has a bug where the high is not the highest value !!!!!
for line in reversed(page):
print(','.join([line.timestamp.strftime('%Y-%m-%dT%H:%M:%S'),
str(line.open),
str(max(line.high, line.open)),
str(min(line.low, line.open)),
str(line.close),
str(line.volume)]), file=fh)
sys.stdout.write(
"progress: completed %d out of %d pages (%.2f%%) \r" %
(i + 1, num_pages, 100 * float(i + 1) / num_pages))
sys.stdout.flush()
time.sleep(1.001)
print("")
def split_in_chunks(start: pd.Timedelta, end: pd.Timedelta, chunk_size: int, bucket_size: str):
i = start
r = []
dt = chunk_size * pd.Timedelta(bucket_size)
while i <= end:
r += [(i, min(end, i + dt))]
i += dt
return r
def get_args(args=None, namespace=None):
parser = argparse.ArgumentParser(description="Get bitmex data")
parser.add_argument('-b', '--begin-time', type=pd.Timestamp, required=True, help="Example: '2018-04-01T00:00:01'")
parser.add_argument('-e', '--end-time', type=pd.Timestamp, required=True, help="Example: '2018-04-02T00:00:01'")
parser.add_argument('-s', '--symbol', type=str, default='XBTUSD',
help='Instrument symbol. Send a bare series (e.g. XBU) to get data for the nearest expiring'
'contract in that series. You can also send a timeframe, e.g. `XBU:monthly`. '
'Timeframes are `daily`, `weekly`, `monthly`, `quarterly`, and `biquarterly`. (optional)')
parser.add_argument('-z', '--bin-size', choices=('1m', '5m', '1h', '1d'), default='1m', type=str,
help='Time interval to bucket by')
parser.add_argument('-o', '--file-or-stdout', type=str, required=True, help='Output filename or "-" for stdout')
parser.add_argument('--partial', action='store_true', default=False, )
args = parser.parse_args(args, namespace)
return args
def main():
args = get_args()
# create an instance of the API class
configuration = swagger_client.Configuration()
configuration.host = 'https://www.bitmex.com/api/v1'
api_instance = swagger_client.TradeApi(swagger_client.ApiClient(configuration))
print("print to file " + (args.file_or_stdout if args.file_or_stdout is not '-' else 'std output'))
try:
print_file(file_or_stdout=args.file_or_stdout,
api_instance=api_instance,
bin_size=args.bin_size, partial=args.partial, symbol=args.symbol,
reverse=False,
start_time=args.begin_time, end_time=args.end_time)
except ApiException as e:
print("Exception when calling TradeApi->trade_get_bucketed: %s\n" % e)
return 0
if __name__ == "__main__":
sys.exit(main())
|
mpl-2.0
| -6,723,906,315,588,768,000
| 35.157895
| 119
| 0.562591
| false
| 3.587467
| false
| false
| false
|
pengkobe/leetcode
|
questions/Regular_Expression_Matching.py
|
1
|
6170
|
# -*- coding: utf-8 -*-
# 本题难度:★★★
# 实现一个正则表达式引擎,让其支持匹配 . 和 *,其中:
# . 匹配任何单字符
# * 匹配 0 个或者多个前字符
# 需要匹配全部输入而非部分输入,函数格式如下:
# bool isMatch(const char *s, const char *p)
# 如:
# isMatch('aa', 'a') // false
# isMatch('aa', 'aa') // true
# isMatch('aaa', 'aa') // false
# isMatch('aa', 'a*') // true
# isMatch('aa', '.*') // true
# isMatch('ab', '.*') // true
# isMatch('aab', 'c*a*b') // true
# 参考答案:https://github.com/barretlee/daily-algorithms/blob/master/answers/6.md
# Wrong Anwer1
# def isMatch(_str,patt):
# if not _str and not patt:
# return True;
# if not _str and not patt.replace("*",""):
# return True;
# if not _str or not patt:
# return False;
# 此处与题目要求不符
# if patt and patt[0]=="*":
# return isMatch(_str[1:],patt) or isMatch(_str,patt[1:]);
# else:
# return (_str[0]==patt[0] or patt[0] ==".") and isMatch(_str[1:],patt[1:]);
# if __name__ == '__main__':
# assert isMatch('aa', 'a') == False
# assert isMatch('aa', 'aa') == True
# assert isMatch('aaa', 'aaa') == True
# assert isMatch('aaa', '.a') == False
# assert isMatch('aa', '.*') == True
# assert isMatch('aab', '*') == True
# assert isMatch('b', '.*.') == False
# assert isMatch('aab', 'c*a*b') == True
# 提交解法1 备份
# class Solution(object):
# def isMatch(self, _str, patt):
# """
# :type s: str
# :type p: str
# :rtype: bool
# """
# if len(patt)==0:
# return len(_str)==0
# if len(patt)>1 and patt[1]=="*":
# i = 0;
# if len(_str) ==0:
# if self.isMatch(_str[0:],patt[2:]):
# return True;
# while i < len(_str):
# if i == 0 and self.isMatch(_str[0:],patt[2:]):
# return True;
# if _str[i] ==patt[0] or patt[0] ==".":
# if self.isMatch(_str[i+1:],patt[2:]):
# return True;
# else:
# break;
# i = i +1;
# return False;
# else:
# if _str and (_str[0]==patt[0] or patt[0] =="."):
# return self.isMatch(_str[1:],patt[1:]);
# else:
# return False;
# 解法1
def isMatch2(_str,patt):
if len(patt)==0:
return len(_str)==0
if len(patt)>1 and patt[1]=="*":
i = 0;
if len(_str) ==0:
if isMatch2(_str[0:],patt[2:]):
return True;
while i < len(_str):
if i == 0 and isMatch2(_str[0:],patt[2:]):
return True;
if _str[i] == patt[0] or patt[0] ==".":
if isMatch2(_str[i+1:],patt[2:]):
return True;
else:
break;
i = i +1;
return False;
else:
print('else',_str[0:]);
if _str and (_str[0]==patt[0] or patt[0] =="."):
return isMatch2(_str[1:],patt[1:]);
else:
return False;
if __name__ == '__main__':
assert isMatch2('aa', 'a') == False
assert isMatch2('aa', 'aa') == True
assert isMatch2('aaa', 'aaa') == True
assert isMatch2('aaa', '.a') == False
assert isMatch2('ab', '.*') == True
assert isMatch2('aa', '.*') == True
assert isMatch2('b', '.*.') == True
assert isMatch2('aab', 'c*a*b') == True
assert isMatch2('aaba', 'ab*a*c*a') == False
assert isMatch2('a', '.*..a*') == False
assert isMatch2('a', 'ab*') == True
assert isMatch2('abcd', 'd*') == False
assert isMatch2('ab', '.*c') == False
## 解法1 参考
# def isMatch3( s, p):
# if len(p)==0:
# return len(s)==0
# if len(p)==1 or p[1]!='*':
# if len(s)==0 or (s[0]!=p[0] and p[0]!='.'):
# return False
# return isMatch3(s[1:],p[1:])
# else:
# i=-1;
# length=len(s)
# while i<length and (i<0 or p[0]=='.' or p[0]==s[i]):
# print(length,i+1,s[i+1:]);
# if isMatch3(s[i+1:],p[2:]):
# return True
# i+=1
# return False
## 动态规划的解法
## 思路推演
# 1. 全部初始化为 False 先,这里用二位数组 dp[i][j] 标识, 即 s 中前 r 个字符与 p 中前 j 个字符是否匹配
# 2. dp[0][0]=True,空字符配空字符,恒为 True
# 3. s 为空字符,考虑 x* 号情形,注意,按题目要求,*前必须有一个非*字符
# 4. 正式开始规划
# 1. 为 .
# 2. 为 * (难点)
# 3. 为普通字符
# @return a boolean
def isMatch4(s, p):
s_len = len(s);
p_len = len(p);
dp = [[False for j in range(p_len+1)] for i in range(s_len+1)];
dp[0][0] = True;
for i in range(2,p_len+1):
if p[i-1] == "*":
dp[0][i] = dp[0][i-2];
for i in range(1,s_len+1):
for j in range(1,p_len+1):
if p[j-1] == ".":
dp[i][j] = dp[i-1][j-1];
elif p[j-1] == "*":
# 误点1. p[i-2]=="."
# 误点2 . dp[i-1][j-1] --> dp[i-1][j]
dp[i][j] = dp[i][j-1] or dp[i][j-2] or ((s[i-1] == p[j-2] or p[j-2]==".") and dp[i-1][j]);
else:
dp[i][j] = dp[i-1][j-1] and (s[i-1] == p[j -1]);
return dp[s_len][p_len];
if __name__ == '__main__':
assert isMatch4('aa', 'a') == False
assert isMatch4('aa', 'aa') == True
assert isMatch4('aaa', '.a') == False
assert isMatch4('ab', '.*') == True
assert isMatch4('aa', '.*') == True
assert isMatch4('b', '.*.') == True
assert isMatch4('aab', 'c*a*b') == True
assert isMatch4('aaba', 'ab*a*c*a') == False
assert isMatch4('a', '.*..a*') == False
assert isMatch4('a', 'ab*') == True
assert isMatch4('abcd', 'd*') == False
assert isMatch4('ab', '.*c') == False
assert isMatch4('abc', 'a*c') == False
# dp[i-1][j-1] --> dp[i-1][j], 举例如 aa vs .
assert isMatch4('aaa', '.*') == True
|
gpl-3.0
| -772,837,806,286,783,600
| 29.136126
| 106
| 0.442669
| false
| 2.41443
| false
| false
| false
|
MisanthropicBit/bibpy
|
examples/requirements_check.py
|
1
|
1795
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Example of checking the requirements of bibtext and biblatex."""
import bibpy
from bibpy.tools import get_abspath_for
def format_requirements_check(required, optional):
s = ""
if required:
s = "required field(s) " + ", ".join(map(str, required))
if optional:
if required:
s += " and "
temp = ["/".join(map(str, opt)) for opt in optional]
s += "optional field(s) " + ", ".join(temp)
return s
if __name__ == '__main__':
filename = get_abspath_for(
__file__,
'../tests/data/biblatex_missing_requirements.bib'
)
entries = bibpy.read_file(filename, format='biblatex').entries
# Collect all results for which a requirements check failed into a list of
# pairs. There is also bibpy.requirements.check for checking individual
# entries
checked = bibpy.requirements.collect(entries, format='biblatex')
print("* Using bibpy.requirements.collect:")
for (entry, (required, optional)) in checked:
if required or optional:
# Either a missing required or optional field for this entry
print("{0}:{1} is missing {2}"
.format(entry.bibtype, entry.bibkey,
format_requirements_check(required, optional)))
# Requirements checks can also be performed on individual entries.
# Use Entry.validate(format) to throw a RequiredFieldError instead of
# returning a bool
entry = entries[2]
print()
print("* {0} for {1}:{2} = {3}".format("entry.valid('biblatex')",
entry.bibtype,
entry.bibkey,
entry.valid('biblatex')))
|
mit
| 8,251,867,451,367,847,000
| 31.636364
| 78
| 0.578273
| false
| 4.243499
| false
| false
| false
|
syci/ingadhoc-odoo-addons
|
partner_views_fields/res_config.py
|
1
|
1176
|
# -*- coding: utf-8 -*-
from openerp import fields, models
class partner_configuration(models.TransientModel):
_inherit = 'base.config.settings'
group_ref = fields.Boolean(
"Show Reference On Partners Tree View",
implied_group='partner_views_fields.group_ref',
)
group_user_id = fields.Boolean(
"Show Commercial On Partners Tree View",
implied_group='partner_views_fields.group_user_id',
)
group_city = fields.Boolean(
"Show City On Partners Tree and Search Views",
implied_group='partner_views_fields.group_city',
)
group_state_id = fields.Boolean(
"Show State On Partners Tree and Search Views",
implied_group='partner_views_fields.group_state_id',
)
group_country_id = fields.Boolean(
"Show Country On Partners Tree and Search Views",
implied_group='partner_views_fields.group_country_id',
)
group_function = fields.Boolean(
"Show Function On Partners Tree and Search Views",
implied_group='partner_views_fields.group_function',
)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
| 5,898,531,089,247,119,000
| 34.636364
| 65
| 0.654762
| false
| 3.89404
| false
| false
| false
|
malt1/lutris
|
tests/test_installer.py
|
1
|
1700
|
from unittest import TestCase
from lutris.installer import ScriptInterpreter, ScriptingError
class MockInterpreter(ScriptInterpreter):
""" a script interpreter mock """
script = {'runner': 'linux'}
def is_valid(self):
return True
class TestScriptInterpreter(TestCase):
def test_script_with_correct_values_is_valid(self):
script = {
'runner': 'foo',
'installer': 'bar',
'name': 'baz',
'game_slug': 'baz',
}
interpreter = ScriptInterpreter(script, None)
self.assertFalse(interpreter.errors)
self.assertTrue(interpreter.is_valid())
def test_move_requires_src_and_dst(self):
script = {
'foo': 'bar',
'installer': {},
'name': 'missing_runner',
'game_slug': 'missing-runner'
}
with self.assertRaises(ScriptingError):
interpreter = ScriptInterpreter(script, None)
interpreter._get_move_paths({})
def test_get_command_returns_a_method(self):
interpreter = MockInterpreter({}, None)
command, params = interpreter._map_command({'move': 'whatever'})
self.assertIn("bound method MockInterpreter.move", str(command))
self.assertEqual(params, "whatever")
def test_get_command_doesnt_return_private_methods(self):
""" """
interpreter = MockInterpreter({}, None)
with self.assertRaises(ScriptingError) as ex:
command, params = interpreter._map_command(
{'_substitute': 'foo'}
)
self.assertEqual(ex.exception.message,
"The command substitute does not exists")
|
gpl-3.0
| -5,782,265,075,609,909,000
| 33
| 72
| 0.594118
| false
| 4.381443
| true
| false
| false
|
jossgray/zyrecffi
|
zyrecffi/_cffi.py
|
1
|
3231
|
from cffi import FFI
import os, sys
ffi = FFI()
ffi.cdef('''
// zsock.h
typedef struct _zsock_t zsock_t;
// zmsg.h
typedef struct _zmsg_t zmsg_t;
int zmsg_addstr (zmsg_t* self, const char* string);
char* zmsg_popstr (zmsg_t* self);
// zyre.h
typedef struct _zyre_t zyre_t;
zyre_t* zyre_new (const char *name);
void zyre_destroy (zyre_t **self_p);
const char* zyre_uuid (zyre_t *self);
const char *zyre_name (zyre_t *self);
void zyre_set_header (zyre_t *self, const char *name, const char *format, ...);
void zyre_set_verbose (zyre_t *self);
void zyre_set_port (zyre_t *self, int port_nbr);
void zyre_set_interval (zyre_t *self, size_t interval);
void zyre_set_interface (zyre_t *self, const char *value);
int zyre_set_endpoint (zyre_t *self, const char *format, ...);
void zyre_gossip_bind (zyre_t *self, const char *format, ...);
void zyre_gossip_connect (zyre_t *self, const char *format, ...);
int zyre_start (zyre_t *self);
void zyre_stop (zyre_t *self);
int zyre_join (zyre_t *self, const char *group);
int zyre_leave (zyre_t *self, const char *group);
zmsg_t* zyre_recv (zyre_t *self);
int zyre_whisper (zyre_t *self, const char *peer, zmsg_t **msg_p);
int zyre_shout (zyre_t *self, const char *group, zmsg_t **msg_p);
int zyre_whispers (zyre_t *self, const char *peer, const char *format, ...);
int zyre_shouts (zyre_t *self, const char *group, const char *format, ...);
zsock_t* zyre_socket (zyre_t *self);
void zyre_dump (zyre_t *self);
void zyre_version (int *major, int *minor, int *patch);
void zyre_test (bool verbose);
// zhash.h
typedef struct _zhash_t zhash_t;
// zyre_event.h
typedef struct _zyre_event_t zyre_event_t;
typedef enum {
ZYRE_EVENT_ENTER = 1,
ZYRE_EVENT_JOIN = 2,
ZYRE_EVENT_LEAVE = 3,
ZYRE_EVENT_EXIT = 4,
ZYRE_EVENT_WHISPER = 5,
ZYRE_EVENT_SHOUT = 6
} zyre_event_type_t;
zyre_event_t* zyre_event_new (zyre_t *self);
void zyre_event_destroy (zyre_event_t **self_p);
zyre_event_type_t zyre_event_type (zyre_event_t *self);
char * zyre_event_sender (zyre_event_t *self);
char * zyre_event_name (zyre_event_t *self);
char * zyre_event_address (zyre_event_t *self);
char * zyre_event_header (zyre_event_t *self, char *name);
char * zyre_event_group (zyre_event_t *self);
zmsg_t * zyre_event_msg (zyre_event_t *self);
zhash_t * zyre_event_headers (zyre_event_t *self);
// zsys.h
const char * zsys_interface ();
// zsock_option.h
int zsock_fd (zsock_t *self);
// zpoller.h
typedef struct _zpoller_t zpoller_t;
zpoller_t * zpoller_new (void *reader, ...);
void zpoller_destroy (zpoller_t **self_p);
void * zpoller_wait (zpoller_t *self, int timeout);
int zpoller_add (zpoller_t *self, void *reader);
''')
os.environ['PATH'] = os.environ['PATH'] + ';' + os.path.abspath(os.path.join(os.path.dirname(__file__)))
_zyre_lib_name, _czmq_lib_name = 'zyre', 'czmq'
if sys.platform == 'win32':
_zyre_lib_name, _czmq_lib_name = 'zyre.dll', 'czmq.dll'
zyre_lib = ffi.dlopen(_zyre_lib_name)
czmq_lib = ffi.dlopen(_czmq_lib_name)
new_int_ptr = lambda val: ffi.new('int*', val)
new_void_ptr = lambda val: ffi.new('void*', val)
c_string_to_py = lambda s: ffi.string(s) if s else None
check_null = lambda val: val if val else None
|
gpl-3.0
| 5,003,882,334,878,248,000
| 21.444444
| 104
| 0.665738
| false
| 2.317791
| false
| false
| false
|
cwisecarver/osf.io
|
api/base/serializers.py
|
1
|
58011
|
import collections
import re
import furl
from django.core.urlresolvers import resolve, reverse, NoReverseMatch
from django.core.exceptions import ImproperlyConfigured
from django.utils import six
from rest_framework import exceptions, permissions
from rest_framework import serializers as ser
from rest_framework.fields import SkipField
from rest_framework.fields import get_attribute as get_nested_attributes
from api.base import utils
from api.base.exceptions import InvalidQueryStringError
from api.base.exceptions import Conflict
from api.base.exceptions import JSONAPIException
from api.base.exceptions import TargetNotSupportedError
from api.base.exceptions import RelationshipPostMakesNoChanges
from api.base.settings import BULK_SETTINGS
from api.base.utils import absolute_reverse, extend_querystring_params, get_user_auth, extend_querystring_if_key_exists
from framework.auth import core as auth_core
from osf.models import AbstractNode as Node
from website import settings
from website import util as website_utils
from website.util.sanitize import strip_html
from website.project.model import has_anonymous_link
def format_relationship_links(related_link=None, self_link=None, rel_meta=None, self_meta=None):
"""
Properly handles formatting of self and related links according to JSON API.
Removes related or self link, if none.
"""
ret = {'links': {}}
if related_link:
ret['links'].update({
'related': {
'href': related_link or {},
'meta': rel_meta or {}
}
})
if self_link:
ret['links'].update({
'self': {
'href': self_link or {},
'meta': self_meta or {}
}
})
return ret
def is_anonymized(request):
if hasattr(request, '_is_anonymized'):
return request._is_anonymized
private_key = request.query_params.get('view_only', None)
request._is_anonymized = website_utils.check_private_key_for_anonymized_link(private_key)
return request._is_anonymized
class ShowIfVersion(ser.Field):
"""
Skips the field if the specified request version is not after a feature's earliest supported version,
or not before the feature's latest supported version.
"""
def __init__(self, field, min_version, max_version, **kwargs):
super(ShowIfVersion, self).__init__(**kwargs)
self.field = field
self.required = field.required
self.read_only = field.read_only
self.min_version = min_version
self.max_version = max_version
self.help_text = 'This field is deprecated as of version {}'.format(self.max_version) or kwargs.get('help_text')
def get_attribute(self, instance):
request = self.context.get('request')
if request and utils.is_deprecated(request.version, self.min_version, self.max_version):
raise SkipField
return self.field.get_attribute(instance)
def bind(self, field_name, parent):
super(ShowIfVersion, self).bind(field_name, parent)
self.field.bind(field_name, self)
def to_representation(self, value):
if getattr(self.field.root, 'child', None):
self.field.parent = self.field.root.child
else:
self.field.parent = self.field.root
return self.field.to_representation(value)
def to_esi_representation(self, value, envelope='data'):
if getattr(self.field.root, 'child', None):
self.field.parent = self.field.root.child
else:
self.field.parent = self.field.root
return self.field.to_esi_representation(value, envelope)
def to_internal_value(self, data):
return self.field.to_internal_value(data)
class HideIfRegistration(ser.Field):
"""
If node is a registration, this field will return None.
"""
def __init__(self, field, **kwargs):
super(HideIfRegistration, self).__init__(**kwargs)
self.field = field
self.source = field.source
self.required = field.required
self.read_only = field.read_only
def get_attribute(self, instance):
if instance.is_registration:
if isinstance(self.field, RelationshipField):
raise SkipField
else:
return None
return self.field.get_attribute(instance)
def bind(self, field_name, parent):
super(HideIfRegistration, self).bind(field_name, parent)
self.field.bind(field_name, self)
def to_internal_value(self, data):
return self.field.to_internal_value(data)
def to_representation(self, value):
if getattr(self.field.root, 'child', None):
self.field.parent = self.field.root.child
else:
self.field.parent = self.field.root
return self.field.to_representation(value)
def to_esi_representation(self, value, envelope='data'):
if getattr(self.field.root, 'child', None):
self.field.parent = self.field.root.child
else:
self.field.parent = self.field.root
return self.field.to_esi_representation(value, envelope)
class HideIfDisabled(ser.Field):
"""
If the user is disabled, returns None for attribute fields, or skips
if a RelationshipField.
"""
def __init__(self, field, **kwargs):
super(HideIfDisabled, self).__init__(**kwargs)
self.field = field
self.source = field.source
self.required = field.required
self.read_only = field.read_only
def get_attribute(self, instance):
if instance.is_disabled:
if isinstance(self.field, RelationshipField):
raise SkipField
else:
return None
return self.field.get_attribute(instance)
def bind(self, field_name, parent):
super(HideIfDisabled, self).bind(field_name, parent)
self.field.bind(field_name, self)
def to_internal_value(self, data):
return self.field.to_internal_value(data)
def to_representation(self, value):
if getattr(self.field.root, 'child', None):
self.field.parent = self.field.root.child
else:
self.field.parent = self.field.root
return self.field.to_representation(value)
def to_esi_representation(self, value, envelope='data'):
if getattr(self.field.root, 'child', None):
self.field.parent = self.field.root.child
else:
self.field.parent = self.field.root
return self.field.to_esi_representation(value, envelope)
class HideIfWithdrawal(HideIfRegistration):
"""
If registration is withdrawn, this field will return None.
"""
def get_attribute(self, instance):
if instance.is_retracted:
if isinstance(self.field, RelationshipField):
raise SkipField
else:
return None
return self.field.get_attribute(instance)
class AllowMissing(ser.Field):
def __init__(self, field, **kwargs):
super(AllowMissing, self).__init__(**kwargs)
self.field = field
def to_representation(self, value):
return self.field.to_representation(value)
def bind(self, field_name, parent):
super(AllowMissing, self).bind(field_name, parent)
self.field.bind(field_name, self)
def get_attribute(self, instance):
"""
Overwrite the error message to return a blank value is if there is no existing value.
This allows the display of keys that do not exist in the DB (gitHub on a new OSF account for example.)
"""
try:
return self.field.get_attribute(instance)
except SkipField:
return ''
def to_internal_value(self, data):
return self.field.to_internal_value(data)
def _url_val(val, obj, serializer, request, **kwargs):
"""Function applied by `HyperlinksField` to get the correct value in the
schema.
"""
url = None
if isinstance(val, Link): # If a Link is passed, get the url value
url = val.resolve_url(obj, request)
elif isinstance(val, basestring): # if a string is passed, it's a method of the serializer
if getattr(serializer, 'field', None):
serializer = serializer.parent
url = getattr(serializer, val)(obj) if obj is not None else None
else:
url = val
if not url and url != 0:
raise SkipField
else:
return url
class DateByVersion(ser.DateTimeField):
"""
Custom DateTimeField that forces dates into the ISO-8601 format with timezone information in version 2.2.
"""
def to_representation(self, value):
request = self.context.get('request')
if request:
if request.version >= '2.2':
self.format = '%Y-%m-%dT%H:%M:%S.%fZ'
else:
self.format = '%Y-%m-%dT%H:%M:%S.%f' if value.microsecond else '%Y-%m-%dT%H:%M:%S'
return super(DateByVersion, self).to_representation(value)
class IDField(ser.CharField):
"""
ID field that validates that 'id' in the request body is the same as the instance 'id' for single requests.
"""
def __init__(self, **kwargs):
kwargs['label'] = 'ID'
super(IDField, self).__init__(**kwargs)
# Overrides CharField
def to_internal_value(self, data):
request = self.context.get('request')
if request:
if request.method in utils.UPDATE_METHODS and not utils.is_bulk_request(request):
id_field = self.get_id(self.root.instance)
if id_field != data:
raise Conflict(detail=('The id you used in the URL, "{}", does not match the id you used in the json body\'s id field, "{}". The object "{}" exists, otherwise you\'d get a 404, so most likely you need to change the id field to match.'.format(id_field, data, id_field)))
return super(IDField, self).to_internal_value(data)
def get_id(self, obj):
return getattr(obj, self.source, '_id')
class TypeField(ser.CharField):
"""
Type field that validates that 'type' in the request body is the same as the Meta type.
Also ensures that type is write-only and required.
"""
def __init__(self, **kwargs):
kwargs['write_only'] = True
kwargs['required'] = True
super(TypeField, self).__init__(**kwargs)
# Overrides CharField
def to_internal_value(self, data):
if isinstance(self.root, JSONAPIListSerializer):
type_ = self.root.child.Meta.type_
else:
type_ = self.root.Meta.type_
if type_ != data:
raise Conflict(detail=('This resource has a type of "{}", but you set the json body\'s type field to "{}". You probably need to change the type field to match the resource\'s type.'.format(type_, data)))
return super(TypeField, self).to_internal_value(data)
class TargetTypeField(ser.CharField):
"""
Enforces that the related resource has the correct type
"""
def __init__(self, **kwargs):
kwargs['write_only'] = True
kwargs['required'] = True
self.target_type = kwargs.pop('target_type')
super(TargetTypeField, self).__init__(**kwargs)
def to_internal_value(self, data):
if self.target_type != data:
raise Conflict(detail=('The target resource has a type of "{}", but you set the json body\'s type field to "{}". You probably need to change the type field to match the target resource\'s type.'.format(self.target_type, data)))
return super(TargetTypeField, self).to_internal_value(data)
class JSONAPIListField(ser.ListField):
def to_internal_value(self, data):
if not isinstance(data, list):
self.fail('not_a_list', input_type=type(data).__name__)
return super(JSONAPIListField, self).to_internal_value(data)
class AuthorizedCharField(ser.CharField):
"""
Passes auth of the logged-in user to the object's method
defined as the field source.
Example:
content = AuthorizedCharField(source='get_content')
"""
def __init__(self, source, **kwargs):
self.source = source
super(AuthorizedCharField, self).__init__(source=self.source, **kwargs)
def get_attribute(self, obj):
user = self.context['request'].user
auth = auth_core.Auth(user)
field_source_method = getattr(obj, self.source)
return field_source_method(auth=auth)
class AnonymizedRegexField(AuthorizedCharField):
"""
Performs a regex replace on the content of the authorized object's
source field when an anonymous view is requested.
Example:
content = AnonymizedRegexField(source='get_content', regex='\[@[^\]]*\]\([^\) ]*\)', replace='@A User')
"""
def __init__(self, source, regex, replace, **kwargs):
self.source = source
self.regex = regex
self.replace = replace
super(AnonymizedRegexField, self).__init__(source=self.source, **kwargs)
def get_attribute(self, obj):
value = super(AnonymizedRegexField, self).get_attribute(obj)
if value:
user = self.context['request'].user
auth = auth_core.Auth(user)
if 'view_only' in self.context['request'].query_params:
auth.private_key = self.context['request'].query_params['view_only']
if has_anonymous_link(obj.node, auth):
value = re.sub(self.regex, self.replace, value)
return value
class RelationshipField(ser.HyperlinkedIdentityField):
"""
RelationshipField that permits the return of both self and related links, along with optional
meta information. ::
children = RelationshipField(
related_view='nodes:node-children',
related_view_kwargs={'node_id': '<_id>'},
self_view='nodes:node-node-children-relationship',
self_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_node_count'}
)
The lookup field must be surrounded in angular brackets to find the attribute on the target. Otherwise, the lookup
field will be returned verbatim. ::
wiki_home = RelationshipField(
related_view='addon:addon-detail',
related_view_kwargs={'node_id': '<_id>', 'provider': 'wiki'},
)
'_id' is enclosed in angular brackets, but 'wiki' is not. 'id' will be looked up on the target, but 'wiki' will not.
The serialized result would be '/nodes/abc12/addons/wiki'.
Field can handle nested attributes: ::
wiki_home = RelationshipField(
related_view='wiki:wiki-detail',
related_view_kwargs={'node_id': '<_id>', 'wiki_id': '<wiki_pages_current.home>'}
)
Field can handle a filter_key, which operates as the source field (but
is named differently to not interfere with HyperLinkedIdentifyField's source
The ``filter_key`` argument defines the Mongo key (or ODM field name) to filter on
when using the ``FilterMixin`` on a view. ::
parent = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<parent_node._id>'},
filter_key='parent_node'
)
Field can include optional filters:
Example:
replies = RelationshipField(
self_view='nodes:node-comments',
self_view_kwargs={'node_id': '<node._id>'},
filter={'target': '<_id>'})
)
"""
json_api_link = True # serializes to a links object
def __init__(self, related_view=None, related_view_kwargs=None, self_view=None, self_view_kwargs=None,
self_meta=None, related_meta=None, always_embed=False, filter=None, filter_key=None, **kwargs):
related_view = related_view
self_view = self_view
related_kwargs = related_view_kwargs
self_kwargs = self_view_kwargs
self.views = {'related': related_view, 'self': self_view}
self.view_kwargs = {'related': related_kwargs, 'self': self_kwargs}
self.related_meta = related_meta
self.self_meta = self_meta
self.always_embed = always_embed
self.filter = filter
self.filter_key = filter_key
assert (related_view is not None or self_view is not None), 'Self or related view must be specified.'
if related_view:
assert related_kwargs is not None, 'Must provide related view kwargs.'
if not callable(related_kwargs):
assert isinstance(related_kwargs,
dict), "Related view kwargs must have format {'lookup_url_kwarg: lookup_field}."
if self_view:
assert self_kwargs is not None, 'Must provide self view kwargs.'
assert isinstance(self_kwargs, dict), "Self view kwargs must have format {'lookup_url_kwarg: lookup_field}."
view_name = related_view
if view_name:
lookup_kwargs = related_kwargs
else:
view_name = self_view
lookup_kwargs = self_kwargs
if kwargs.get('lookup_url_kwarg', None):
lookup_kwargs = kwargs.pop('lookup_url_kwarg')
super(RelationshipField, self).__init__(view_name, lookup_url_kwarg=lookup_kwargs, **kwargs)
# Allow a RelationshipField to be modified if explicitly set so
if kwargs.get('read_only') is not None:
self.read_only = kwargs['read_only']
def resolve(self, resource, field_name, request):
"""
Resolves the view when embedding.
"""
lookup_url_kwarg = self.lookup_url_kwarg
if callable(lookup_url_kwarg):
lookup_url_kwarg = lookup_url_kwarg(getattr(resource, field_name))
kwargs = {attr_name: self.lookup_attribute(resource, attr) for (attr_name, attr) in lookup_url_kwarg.items()}
kwargs.update({'version': request.parser_context['kwargs']['version']})
view = self.view_name
if callable(self.view_name):
view = view(getattr(resource, field_name))
return resolve(
reverse(
view,
kwargs=kwargs
)
)
def process_related_counts_parameters(self, params, value):
"""
Processes related_counts parameter.
Can either be a True/False value for fetching counts on all fields, or a comma-separated list for specifying
individual fields. Ensures field for which we are requesting counts is a relationship field.
"""
if utils.is_truthy(params) or utils.is_falsy(params):
return params
field_counts_requested = [val for val in params.split(',')]
countable_fields = {field for field in self.parent.fields if
getattr(self.parent.fields[field], 'json_api_link', False) or
getattr(getattr(self.parent.fields[field], 'field', None), 'json_api_link', None)}
for count_field in field_counts_requested:
# Some fields will hide relationships, e.g. HideIfWithdrawal
# Ignore related_counts for these fields
fetched_field = self.parent.fields.get(count_field)
hidden = fetched_field and isinstance(fetched_field, HideIfWithdrawal) and getattr(value, 'is_retracted', False)
if not hidden and count_field not in countable_fields:
raise InvalidQueryStringError(
detail="Acceptable values for the related_counts query param are 'true', 'false', or any of the relationship fields; got '{0}'".format(
params),
parameter='related_counts'
)
return field_counts_requested
def get_meta_information(self, meta_data, value):
"""
For retrieving meta values, otherwise returns {}
"""
meta = {}
for key in meta_data or {}:
if key == 'count' or key == 'unread':
show_related_counts = self.context['request'].query_params.get('related_counts', False)
if self.context['request'].parser_context.get('kwargs'):
if self.context['request'].parser_context['kwargs'].get('is_embedded'):
show_related_counts = False
field_counts_requested = self.process_related_counts_parameters(show_related_counts, value)
if utils.is_truthy(show_related_counts):
meta[key] = website_utils.rapply(meta_data[key], _url_val, obj=value, serializer=self.parent, request=self.context['request'])
elif utils.is_falsy(show_related_counts):
continue
elif self.field_name in field_counts_requested:
meta[key] = website_utils.rapply(meta_data[key], _url_val, obj=value, serializer=self.parent, request=self.context['request'])
else:
continue
elif key == 'projects_in_common':
if not get_user_auth(self.context['request']).user:
continue
if not self.context['request'].query_params.get('show_projects_in_common', False):
continue
meta[key] = website_utils.rapply(meta_data[key], _url_val, obj=value, serializer=self.parent, request=self.context['request'])
else:
meta[key] = website_utils.rapply(meta_data[key], _url_val, obj=value, serializer=self.parent, request=self.context['request'])
return meta
def lookup_attribute(self, obj, lookup_field):
"""
Returns attribute from target object unless attribute surrounded in angular brackets where it returns the lookup field.
Also handles the lookup of nested attributes.
"""
bracket_check = _tpl(lookup_field)
if bracket_check:
source_attrs = bracket_check.split('.')
# If you are using a nested attribute for lookup, and you get the attribute wrong, you will not get an
# error message, you will just not see that field. This allows us to have slightly more dynamic use of
# nested attributes in relationship fields.
try:
return_val = get_nested_attributes(obj, source_attrs)
except KeyError:
return None
return return_val
return lookup_field
def kwargs_lookup(self, obj, kwargs_dict):
"""
For returning kwargs dictionary of format {"lookup_url_kwarg": lookup_value}
"""
if callable(kwargs_dict):
kwargs_dict = kwargs_dict(obj)
kwargs_retrieval = {}
for lookup_url_kwarg, lookup_field in kwargs_dict.items():
try:
lookup_value = self.lookup_attribute(obj, lookup_field)
except AttributeError as exc:
raise AssertionError(exc)
if lookup_value is None:
return None
kwargs_retrieval[lookup_url_kwarg] = lookup_value
return kwargs_retrieval
# Overrides HyperlinkedIdentityField
def get_url(self, obj, view_name, request, format):
urls = {}
for view_name, view in self.views.items():
if view is None:
urls[view_name] = {}
else:
kwargs = self.kwargs_lookup(obj, self.view_kwargs[view_name])
if kwargs is None:
urls[view_name] = {}
else:
if callable(view):
view = view(getattr(obj, self.field_name))
kwargs.update({'version': request.parser_context['kwargs']['version']})
url = self.reverse(view, kwargs=kwargs, request=request, format=format)
if self.filter:
formatted_filters = self.format_filter(obj)
if formatted_filters:
for filter in formatted_filters:
url = extend_querystring_params(
url,
{'filter[{}]'.format(filter['field_name']): filter['value']}
)
else:
url = None
url = extend_querystring_if_key_exists(url, self.context['request'], 'view_only')
urls[view_name] = url
if not urls['self'] and not urls['related']:
urls = None
return urls
def to_esi_representation(self, value, envelope='data'):
relationships = self.to_representation(value)
try:
href = relationships['links']['related']['href']
except KeyError:
raise SkipField
else:
if href and not href == '{}':
if self.always_embed:
envelope = 'data'
query_dict = dict(format=['jsonapi', ], envelope=[envelope, ])
if 'view_only' in self.parent.context['request'].query_params.keys():
query_dict.update(view_only=[self.parent.context['request'].query_params['view_only']])
esi_url = extend_querystring_params(href, query_dict)
return '<esi:include src="{}"/>'.format(esi_url)
def format_filter(self, obj):
""" Take filters specified in self.filter and format them in a way that can be easily parametrized
:param obj: RelationshipField object
:return: list of dictionaries with 'field_name' and 'value' for each filter
"""
filter_fields = self.filter.keys()
filters = []
for field_name in filter_fields:
try:
# check if serializer method passed in
serializer_method = getattr(self.parent, self.filter[field_name])
except AttributeError:
value = self.lookup_attribute(obj, self.filter[field_name])
else:
value = serializer_method(obj)
if not value:
continue
filters.append({'field_name': field_name, 'value': value})
return filters if filters else None
# Overrides HyperlinkedIdentityField
def to_representation(self, value):
request = self.context.get('request', None)
format = self.context.get('format', None)
assert request is not None, (
'`%s` requires the request in the serializer'
" context. Add `context={'request': request}` when instantiating "
'the serializer.' % self.__class__.__name__
)
# By default use whatever format is given for the current context
# unless the target is a different type to the source.
#
# Eg. Consider a HyperlinkedIdentityField pointing from a json
# representation to an html property of that representation...
#
# '/snippets/1/' should link to '/snippets/1/highlight/'
# ...but...
# '/snippets/1/.json' should link to '/snippets/1/highlight/.html'
if format and self.format and self.format != format:
format = self.format
# Return the hyperlink, or error if incorrectly configured.
try:
url = self.get_url(value, self.view_name, request, format)
except NoReverseMatch:
msg = (
'Could not resolve URL for hyperlinked relationship using '
'view name "%s". You may have failed to include the related '
'model in your API, or incorrectly configured the '
'`lookup_field` attribute on this field.'
)
if value in ('', None):
value_string = {'': 'the empty string', None: 'None'}[value]
msg += (
' WARNING: The value of the field on the model instance '
"was %s, which may be why it didn't match any "
'entries in your URL conf.' % value_string
)
raise ImproperlyConfigured(msg % self.view_name)
if url is None:
raise SkipField
related_url = url['related']
related_meta = self.get_meta_information(self.related_meta, value)
self_url = url['self']
self_meta = self.get_meta_information(self.self_meta, value)
return format_relationship_links(related_url, self_url, related_meta, self_meta)
class FileCommentRelationshipField(RelationshipField):
def get_url(self, obj, view_name, request, format):
if obj.kind == 'folder':
raise SkipField
return super(FileCommentRelationshipField, self).get_url(obj, view_name, request, format)
class TargetField(ser.Field):
"""
Field that returns a nested dict with the url (constructed based
on the object's type), optional meta information, and link_type.
Example:
target = TargetField(link_type='related', meta={'type': 'get_target_type'})
"""
json_api_link = True # serializes to a links object
view_map = {
'node': {
'view': 'nodes:node-detail',
'lookup_kwarg': 'node_id'
},
'comment': {
'view': 'comments:comment-detail',
'lookup_kwarg': 'comment_id'
},
'nodewikipage': {
'view': None,
'lookup_kwarg': None
}
}
def __init__(self, **kwargs):
self.meta = kwargs.pop('meta', {})
self.link_type = kwargs.pop('link_type', 'url')
super(TargetField, self).__init__(read_only=True, **kwargs)
def resolve(self, resource, field_name, request):
"""
Resolves the view for target node or target comment when embedding.
"""
view_info = self.view_map.get(resource.target.referent._name, None)
if not view_info:
raise TargetNotSupportedError('{} is not a supported target type'.format(
resource.target._name
))
if not view_info['view']:
return None, None, None
embed_value = resource.target._id
return resolve(
reverse(
view_info['view'],
kwargs={
view_info['lookup_kwarg']: embed_value,
'version': request.parser_context['kwargs']['version']
}
)
)
def to_esi_representation(self, value, envelope='data'):
href = value.get_absolute_url()
if href:
esi_url = extend_querystring_params(href, dict(envelope=[envelope, ], format=['jsonapi', ]))
return '<esi:include src="{}"/>'.format(esi_url)
return self.to_representation(value)
def to_representation(self, value):
"""
Returns nested dictionary in format {'links': {'self.link_type': ... }
If no meta information, self.link_type is equal to a string containing link's URL. Otherwise,
the link is represented as a links object with 'href' and 'meta' members.
"""
meta = website_utils.rapply(self.meta, _url_val, obj=value, serializer=self.parent, request=self.context['request'])
return {'links': {self.link_type: {'href': value.referent.get_absolute_url(), 'meta': meta}}}
class LinksField(ser.Field):
"""Links field that resolves to a links object. Used in conjunction with `Link`.
If the object to be serialized implements `get_absolute_url`, then the return value
of that method is used for the `self` link.
Example: ::
links = LinksField({
'html': 'absolute_url',
'children': {
'related': Link('nodes:node-children', node_id='<_id>'),
'count': 'get_node_count'
},
'contributors': {
'related': Link('nodes:node-contributors', node_id='<_id>'),
'count': 'get_contrib_count'
},
'registrations': {
'related': Link('nodes:node-registrations', node_id='<_id>'),
'count': 'get_registration_count'
},
})
"""
def __init__(self, links, *args, **kwargs):
ser.Field.__init__(self, read_only=True, *args, **kwargs)
self.links = links
def get_attribute(self, obj):
# We pass the object instance onto `to_representation`,
# not just the field attribute.
return obj
def extend_absolute_url(self, obj):
return extend_querystring_if_key_exists(obj.get_absolute_url(), self.context['request'], 'view_only')
def to_representation(self, obj):
ret = {}
for name, value in self.links.iteritems():
try:
url = _url_val(value, obj=obj, serializer=self.parent, request=self.context['request'])
except SkipField:
continue
else:
ret[name] = url
if hasattr(obj, 'get_absolute_url') and 'self' not in self.links:
ret['self'] = self.extend_absolute_url(obj)
return ret
class ListDictField(ser.DictField):
def __init__(self, **kwargs):
super(ListDictField, self).__init__(**kwargs)
def to_representation(self, value):
"""
Ensure the value of each key in the Dict to be a list
"""
res = {}
for key, val in value.items():
if isinstance(self.child.to_representation(val), list):
res[six.text_type(key)] = self.child.to_representation(val)
else:
if self.child.to_representation(val):
res[six.text_type(key)] = [self.child.to_representation(val)]
else:
res[six.text_type(key)] = []
return res
_tpl_pattern = re.compile(r'\s*<\s*(\S*)\s*>\s*')
def _tpl(val):
"""Return value within ``< >`` if possible, else return ``None``."""
match = _tpl_pattern.match(val)
if match:
return match.groups()[0]
return None
def _get_attr_from_tpl(attr_tpl, obj):
attr_name = _tpl(str(attr_tpl))
if attr_name:
attribute_value = obj
for attr_segment in attr_name.split('.'):
attribute_value = getattr(attribute_value, attr_segment, ser.empty)
if attribute_value is not ser.empty:
return attribute_value
elif attr_name in obj:
return obj[attr_name]
else:
raise AttributeError(
'{attr_name!r} is not a valid '
'attribute of {obj!r}'.format(
attr_name=attr_name, obj=obj,
))
else:
return attr_tpl
# TODO: Make this a Field that is usable on its own?
class Link(object):
"""Link object to use in conjunction with Links field. Does reverse lookup of
URLs given an endpoint name and attributed enclosed in `<>`. This includes
complex key strings like 'user.id'
"""
def __init__(self, endpoint, args=None, kwargs=None, query_kwargs=None, **kw):
self.endpoint = endpoint
self.kwargs = kwargs or {}
self.args = args or tuple()
self.reverse_kwargs = kw
self.query_kwargs = query_kwargs or {}
def resolve_url(self, obj, request):
kwarg_values = {key: _get_attr_from_tpl(attr_tpl, obj) for key, attr_tpl in self.kwargs.items()}
kwarg_values.update({'version': request.parser_context['kwargs']['version']})
arg_values = [_get_attr_from_tpl(attr_tpl, obj) for attr_tpl in self.args]
query_kwarg_values = {key: _get_attr_from_tpl(attr_tpl, obj) for key, attr_tpl in self.query_kwargs.items()}
# Presumably, if you have are expecting a value but the value is empty, then the link is invalid.
for item in kwarg_values:
if kwarg_values[item] is None:
raise SkipField
return utils.absolute_reverse(
self.endpoint,
args=arg_values,
kwargs=kwarg_values,
query_kwargs=query_kwarg_values,
**self.reverse_kwargs
)
class WaterbutlerLink(Link):
"""Link object to use in conjunction with Links field. Builds a Waterbutler URL for files.
"""
def __init__(self, must_be_file=None, must_be_folder=None, **kwargs):
self.kwargs = kwargs
self.must_be_file = must_be_file
self.must_be_folder = must_be_folder
def resolve_url(self, obj, request):
"""Reverse URL lookup for WaterButler routes
"""
if self.must_be_folder is True and not obj.path.endswith('/'):
raise SkipField
if self.must_be_file is True and obj.path.endswith('/'):
raise SkipField
url = website_utils.waterbutler_api_url_for(obj.node._id, obj.provider, obj.path, **self.kwargs)
if not url:
raise SkipField
else:
return url
class NodeFileHyperLinkField(RelationshipField):
def __init__(self, kind=None, never_embed=False, **kws):
self.kind = kind
self.never_embed = never_embed
super(NodeFileHyperLinkField, self).__init__(**kws)
def get_url(self, obj, view_name, request, format):
if self.kind and obj.kind != self.kind:
raise SkipField
return super(NodeFileHyperLinkField, self).get_url(obj, view_name, request, format)
class JSONAPIListSerializer(ser.ListSerializer):
def to_representation(self, data):
enable_esi = self.context.get('enable_esi', False)
envelope = self.context.update({'envelope': None})
# Don't envelope when serializing collection
errors = {}
bulk_skip_uneditable = utils.is_truthy(self.context['request'].query_params.get('skip_uneditable', False))
if isinstance(data, collections.Mapping):
errors = data.get('errors', None)
data = data.get('data', None)
if enable_esi:
ret = [
self.child.to_esi_representation(item, envelope=None) for item in data
]
else:
ret = [
self.child.to_representation(item, envelope=envelope) for item in data
]
if errors and bulk_skip_uneditable:
ret.append({'errors': errors})
return ret
# Overrides ListSerializer which doesn't support multiple update by default
def update(self, instance, validated_data):
# avoiding circular import
from api.nodes.serializers import ContributorIDField
# if PATCH request, the child serializer's partial attribute needs to be True
if self.context['request'].method == 'PATCH':
self.child.partial = True
bulk_skip_uneditable = utils.is_truthy(self.context['request'].query_params.get('skip_uneditable', False))
if not bulk_skip_uneditable:
if len(instance) != len(validated_data):
raise exceptions.ValidationError({'non_field_errors': 'Could not find all objects to update.'})
id_lookup = self.child.fields['id'].source
data_mapping = {item.get(id_lookup): item for item in validated_data}
if isinstance(self.child.fields['id'], ContributorIDField):
instance_mapping = {self.child.fields['id'].get_id(item): item for item in instance}
else:
instance_mapping = {getattr(item, id_lookup): item for item in instance}
ret = {'data': []}
for resource_id, resource in instance_mapping.items():
data = data_mapping.pop(resource_id, None)
ret['data'].append(self.child.update(resource, data))
# If skip_uneditable in request, add validated_data for nodes in which the user did not have edit permissions to errors
if data_mapping and bulk_skip_uneditable:
ret.update({'errors': data_mapping.values()})
return ret
# overrides ListSerializer
def run_validation(self, data):
meta = getattr(self, 'Meta', None)
bulk_limit = getattr(meta, 'bulk_limit', BULK_SETTINGS['DEFAULT_BULK_LIMIT'])
num_items = len(data)
if num_items > bulk_limit:
raise JSONAPIException(source={'pointer': '/data'},
detail='Bulk operation limit is {}, got {}.'.format(bulk_limit, num_items))
return super(JSONAPIListSerializer, self).run_validation(data)
# overrides ListSerializer: Add HTML-sanitization similar to that used by APIv1 front-end views
def is_valid(self, clean_html=True, **kwargs):
"""
After validation, scrub HTML from validated_data prior to saving (for create and update views)
Exclude 'type' from validated_data.
"""
ret = super(JSONAPIListSerializer, self).is_valid(**kwargs)
if clean_html is True:
self._validated_data = website_utils.rapply(self.validated_data, strip_html)
for data in self._validated_data:
data.pop('type', None)
return ret
class SparseFieldsetMixin(object):
def parse_sparse_fields(self, allow_unsafe=False, **kwargs):
request = kwargs.get('context', {}).get('request', None)
if request and (allow_unsafe or request.method in permissions.SAFE_METHODS):
sparse_fieldset_query_param = 'fields[{}]'.format(self.Meta.type_)
if sparse_fieldset_query_param in request.query_params:
fieldset = request.query_params[sparse_fieldset_query_param].split(',')
for field_name in self.fields.fields.copy().keys():
if field_name in ('id', 'links', 'type'):
# MUST return these fields
continue
if field_name not in fieldset:
self.fields.pop(field_name)
class BaseAPISerializer(ser.Serializer, SparseFieldsetMixin):
def __init__(self, *args, **kwargs):
self.parse_sparse_fields(**kwargs)
super(BaseAPISerializer, self).__init__(*args, **kwargs)
self.model_field_names = [name if field.source == '*' else field.source
for name, field in self.fields.iteritems()]
class JSONAPISerializer(BaseAPISerializer):
"""Base serializer. Requires that a `type_` option is set on `class Meta`. Also
allows for enveloping of both single resources and collections. Looks to nest fields
according to JSON API spec. Relational fields must set json_api_link=True flag.
Self/html links must be nested under "links".
"""
# Don't serialize relationships that use these views
# when viewing thru an anonymous VOL
views_to_hide_if_anonymous = {
'users:user-detail',
'nodes:node-registrations',
}
# overrides Serializer
@classmethod
def many_init(cls, *args, **kwargs):
kwargs['child'] = cls(*args, **kwargs)
return JSONAPIListSerializer(*args, **kwargs)
def invalid_embeds(self, fields, embeds):
fields_check = fields[:]
for index, field in enumerate(fields_check):
if getattr(field, 'field', None):
fields_check[index] = field.field
invalid_embeds = set(embeds.keys()) - set(
[f.field_name for f in fields_check if getattr(f, 'json_api_link', False)])
return invalid_embeds
def to_esi_representation(self, data, envelope='data'):
href = None
query_params_blacklist = ['page[size]']
href = self.get_absolute_url(data)
if href and href != '{}':
esi_url = furl.furl(href).add(args=dict(self.context['request'].query_params)).remove(
args=query_params_blacklist).remove(args=['envelope']).add(args={'envelope': envelope}).url
return '<esi:include src="{}"/>'.format(esi_url)
# failsafe, let python do it if something bad happened in the ESI construction
return super(JSONAPISerializer, self).to_representation(data)
# overrides Serializer
def to_representation(self, obj, envelope='data'):
"""Serialize to final representation.
:param obj: Object to be serialized.
:param envelope: Key for resource object.
"""
ret = {}
meta = getattr(self, 'Meta', None)
type_ = getattr(meta, 'type_', None)
assert type_ is not None, 'Must define Meta.type_'
self.parse_sparse_fields(allow_unsafe=True, context=self.context)
data = {
'id': '',
'type': type_,
'attributes': {},
'relationships': {},
'embeds': {},
'links': {},
}
embeds = self.context.get('embed', {})
context_envelope = self.context.get('envelope', envelope)
if context_envelope == 'None':
context_envelope = None
enable_esi = self.context.get('enable_esi', False)
is_anonymous = is_anonymized(self.context['request'])
to_be_removed = set()
if is_anonymous and hasattr(self, 'non_anonymized_fields'):
# Drop any fields that are not specified in the `non_anonymized_fields` variable.
allowed = set(self.non_anonymized_fields)
existing = set(self.fields.keys())
to_be_removed = existing - allowed
fields = [field for field in self.fields.values() if
not field.write_only and field.field_name not in to_be_removed]
invalid_embeds = self.invalid_embeds(fields, embeds)
invalid_embeds = invalid_embeds - to_be_removed
if invalid_embeds:
raise InvalidQueryStringError(parameter='embed',
detail='The following fields are not embeddable: {}'.format(
', '.join(invalid_embeds)))
for field in fields:
try:
attribute = field.get_attribute(obj)
except SkipField:
continue
nested_field = getattr(field, 'field', None)
if attribute is None:
# We skip `to_representation` for `None` values so that
# fields do not have to explicitly deal with that case.
data['attributes'][field.field_name] = None
else:
try:
if hasattr(attribute, 'all'):
representation = field.to_representation(attribute.all())
else:
representation = field.to_representation(attribute)
except SkipField:
continue
if getattr(field, 'json_api_link', False) or getattr(nested_field, 'json_api_link', False):
# If embed=field_name is appended to the query string or 'always_embed' flag is True, directly embed the
# results in addition to adding a relationship link
if embeds and (field.field_name in embeds or getattr(field, 'always_embed', None)):
if enable_esi:
try:
result = field.to_esi_representation(attribute, envelope=envelope)
except SkipField:
continue
else:
try:
# If a field has an empty representation, it should not be embedded.
result = self.context['embed'][field.field_name](obj)
except SkipField:
result = None
if result:
data['embeds'][field.field_name] = result
else:
data['embeds'][field.field_name] = {'error': 'This field is not embeddable.'}
try:
if not (is_anonymous and
hasattr(field, 'view_name') and
field.view_name in self.views_to_hide_if_anonymous):
data['relationships'][field.field_name] = representation
except SkipField:
continue
elif field.field_name == 'id':
data['id'] = representation
elif field.field_name == 'links':
data['links'] = representation
else:
data['attributes'][field.field_name] = representation
if not data['relationships']:
del data['relationships']
if not data['embeds']:
del data['embeds']
if context_envelope:
ret[context_envelope] = data
if is_anonymous:
ret['meta'] = {'anonymous': True}
else:
ret = data
return ret
def get_absolute_url(self, obj):
raise NotImplementedError()
def get_absolute_html_url(self, obj):
return extend_querystring_if_key_exists(obj.absolute_url, self.context['request'], 'view_only')
# overrides Serializer: Add HTML-sanitization similar to that used by APIv1 front-end views
def is_valid(self, clean_html=True, **kwargs):
"""
After validation, scrub HTML from validated_data prior to saving (for create and update views)
Exclude 'type' and '_id' from validated_data.
"""
ret = super(JSONAPISerializer, self).is_valid(**kwargs)
if clean_html is True:
self._validated_data = self.sanitize_data()
self._validated_data.pop('type', None)
self._validated_data.pop('target_type', None)
if self.context['request'].method in utils.UPDATE_METHODS:
self._validated_data.pop('_id', None)
return ret
def sanitize_data(self):
return website_utils.rapply(self.validated_data, strip_html)
class JSONAPIRelationshipSerializer(BaseAPISerializer):
"""Base Relationship serializer. Requires that a `type_` option is set on `class Meta`.
Provides a simplified serialization of the relationship, allowing for simple update request
bodies.
"""
id = ser.CharField(required=False, allow_null=True)
type = TypeField(required=False, allow_null=True)
def to_representation(self, obj):
meta = getattr(self, 'Meta', None)
type_ = getattr(meta, 'type_', None)
assert type_ is not None, 'Must define Meta.type_'
relation_id_field = self.fields['id']
attribute = relation_id_field.get_attribute(obj)
relationship = relation_id_field.to_representation(attribute)
data = {'type': type_, 'id': relationship} if relationship else None
return data
def DevOnly(field):
"""Make a field only active in ``DEV_MODE``. ::
experimental_field = DevMode(CharField(required=False))
"""
return field if settings.DEV_MODE else None
class RestrictedDictSerializer(ser.Serializer):
def to_representation(self, obj):
data = {}
fields = [field for field in self.fields.values() if not field.write_only]
for field in fields:
try:
attribute = field.get_attribute(obj)
except ser.SkipField:
continue
if attribute is None:
# We skip `to_representation` for `None` values so that
# fields do not have to explicitly deal with that case.
data[field.field_name] = None
else:
data[field.field_name] = field.to_representation(attribute)
return data
def relationship_diff(current_items, new_items):
"""
To be used in POST and PUT/PATCH relationship requests, as, by JSON API specs,
in update requests, the 'remove' items' relationships would be deleted, and the
'add' would be added, while for create requests, only the 'add' would be added.
:param current_items: The current items in the relationship
:param new_items: The items passed in the request
:return:
"""
return {
'add': {k: new_items[k] for k in (set(new_items.keys()) - set(current_items.keys()))},
'remove': {k: current_items[k] for k in (set(current_items.keys()) - set(new_items.keys()))}
}
class AddonAccountSerializer(JSONAPISerializer):
id = ser.CharField(source='_id', read_only=True)
provider = ser.CharField(read_only=True)
profile_url = ser.CharField(required=False, read_only=True)
display_name = ser.CharField(required=False, read_only=True)
links = links = LinksField({
'self': 'get_absolute_url',
})
class Meta:
type_ = 'external_accounts'
def get_absolute_url(self, obj):
kwargs = self.context['request'].parser_context['kwargs']
kwargs.update({'account_id': obj._id})
return absolute_reverse(
'users:user-external_account-detail',
kwargs=kwargs
)
return obj.get_absolute_url()
class LinkedNode(JSONAPIRelationshipSerializer):
id = ser.CharField(source='_id', required=False, allow_null=True)
class Meta:
type_ = 'linked_nodes'
class LinkedRegistration(JSONAPIRelationshipSerializer):
id = ser.CharField(source='_id', required=False, allow_null=True)
class Meta:
type_ = 'linked_registrations'
class LinkedNodesRelationshipSerializer(BaseAPISerializer):
data = ser.ListField(child=LinkedNode())
links = LinksField({'self': 'get_self_url',
'html': 'get_related_url'})
def get_self_url(self, obj):
return obj['self'].linked_nodes_self_url
def get_related_url(self, obj):
return obj['self'].linked_nodes_related_url
class Meta:
type_ = 'linked_nodes'
def get_pointers_to_add_remove(self, pointers, new_pointers):
diff = relationship_diff(
current_items={pointer._id: pointer for pointer in pointers},
new_items={val['_id']: val for val in new_pointers}
)
nodes_to_add = []
for node_id in diff['add']:
node = Node.load(node_id)
if not node:
raise exceptions.NotFound(detail='Node with id "{}" was not found'.format(node_id))
nodes_to_add.append(node)
return nodes_to_add, diff['remove'].values()
def make_instance_obj(self, obj):
# Convenience method to format instance based on view's get_object
return {'data': [
pointer for pointer in
obj.linked_nodes.filter(is_deleted=False, type='osf.node')
], 'self': obj}
def update(self, instance, validated_data):
collection = instance['self']
auth = utils.get_user_auth(self.context['request'])
add, remove = self.get_pointers_to_add_remove(pointers=instance['data'], new_pointers=validated_data['data'])
for pointer in remove:
collection.rm_pointer(pointer, auth)
for node in add:
collection.add_pointer(node, auth)
return self.make_instance_obj(collection)
def create(self, validated_data):
instance = self.context['view'].get_object()
auth = utils.get_user_auth(self.context['request'])
collection = instance['self']
add, remove = self.get_pointers_to_add_remove(pointers=instance['data'], new_pointers=validated_data['data'])
if not len(add):
raise RelationshipPostMakesNoChanges
for node in add:
collection.add_pointer(node, auth)
return self.make_instance_obj(collection)
class LinkedRegistrationsRelationshipSerializer(BaseAPISerializer):
data = ser.ListField(child=LinkedRegistration())
links = LinksField({'self': 'get_self_url',
'html': 'get_related_url'})
def get_self_url(self, obj):
return obj['self'].linked_registrations_self_url
def get_related_url(self, obj):
return obj['self'].linked_registrations_related_url
class Meta:
type_ = 'linked_registrations'
def get_pointers_to_add_remove(self, pointers, new_pointers):
diff = relationship_diff(
current_items={pointer._id: pointer for pointer in pointers},
new_items={val['_id']: val for val in new_pointers}
)
nodes_to_add = []
for node_id in diff['add']:
node = Node.load(node_id)
if not node:
raise exceptions.NotFound(detail='Node with id "{}" was not found'.format(node_id))
nodes_to_add.append(node)
return nodes_to_add, diff['remove'].values()
def make_instance_obj(self, obj):
# Convenience method to format instance based on view's get_object
return {'data': [
pointer for pointer in
obj.linked_nodes.filter(is_deleted=False, type='osf.registration')
], 'self': obj}
def update(self, instance, validated_data):
collection = instance['self']
auth = utils.get_user_auth(self.context['request'])
add, remove = self.get_pointers_to_add_remove(pointers=instance['data'], new_pointers=validated_data['data'])
for pointer in remove:
collection.rm_pointer(pointer, auth)
for node in add:
collection.add_pointer(node, auth)
return self.make_instance_obj(collection)
def create(self, validated_data):
instance = self.context['view'].get_object()
auth = utils.get_user_auth(self.context['request'])
collection = instance['self']
add, remove = self.get_pointers_to_add_remove(pointers=instance['data'], new_pointers=validated_data['data'])
if not len(add):
raise RelationshipPostMakesNoChanges
for node in add:
collection.add_pointer(node, auth)
return self.make_instance_obj(collection)
|
apache-2.0
| 3,039,951,152,254,714,000
| 37.725634
| 289
| 0.597076
| false
| 4.176157
| false
| false
| false
|
yakky/djangocms-text-ckeditor
|
djangocms_text_ckeditor/forms.py
|
1
|
3464
|
# -*- coding: utf-8 -*-
from django import forms
from django.core import signing
from django.core.signing import BadSignature
from django.forms.models import ModelForm
from django.template import RequestContext
from django.utils.translation import ugettext
from cms.models import CMSPlugin
from .models import Text
from .utils import _render_cms_plugin, plugin_tags_to_id_list, plugin_to_tag
class ActionTokenValidationForm(forms.Form):
token = forms.CharField(required=True)
def get_id_from_token(self, session_id):
payload = self.cleaned_data['token']
signer = signing.Signer(salt=session_id)
try:
return signer.unsign(payload)
except BadSignature:
return False
class RenderPluginForm(forms.Form):
plugin = forms.ModelChoiceField(
queryset=CMSPlugin.objects.none(),
required=True,
)
def __init__(self, *args, **kwargs):
self.text_plugin = kwargs.pop('text_plugin')
super(RenderPluginForm, self).__init__(*args, **kwargs)
self.fields['plugin'].queryset = self.get_child_plugins()
def get_child_plugins(self):
return self.text_plugin.get_descendants()
def render_plugin(self, request):
plugin = self.cleaned_data['plugin']
context = RequestContext(request)
context['request'] = request
rendered_content = _render_cms_plugin(plugin, context)
return plugin_to_tag(plugin, content=rendered_content, admin=True)
class DeleteOnCancelForm(forms.Form):
child_plugins = forms.ModelMultipleChoiceField(
queryset=CMSPlugin.objects.none(),
required=False,
)
def __init__(self, *args, **kwargs):
self.text_plugin = kwargs.pop('text_plugin')
super(DeleteOnCancelForm, self).__init__(*args, **kwargs)
self.fields['child_plugins'].queryset = self.get_child_plugins()
def clean(self):
children = self.cleaned_data.get('child_plugins')
if not children and self.text_plugin.get_plugin_instance()[0]:
# This check prevents users from using a cancel token
# to delete just any text plugin.
# Only non-saved text plugins can be deleted.
message = ugettext("Can't delete a saved plugin.")
raise forms.ValidationError(message, code='invalid')
return self.cleaned_data
def get_child_plugins(self):
# We use this queryset to limit the plugins
# a user can delete to only plugins that have not
# been saved in text and are descendants of the text plugin.
instance = self.text_plugin.get_plugin_instance()[0]
if instance:
# Only non-saved children can be deleted.
excluded_plugins = plugin_tags_to_id_list(instance.body)
else:
excluded_plugins = []
queryset = self.text_plugin.get_descendants()
if excluded_plugins:
queryset = queryset.exclude(pk__in=excluded_plugins)
return queryset
def delete(self):
child_plugins = self.cleaned_data.get('child_plugins')
if child_plugins:
child_plugins.delete()
else:
self.text_plugin.delete()
class TextForm(ModelForm):
body = forms.CharField()
class Meta:
model = Text
exclude = (
'page',
'position',
'placeholder',
'language',
'plugin_type',
)
|
bsd-3-clause
| -7,001,191,991,926,625,000
| 29.928571
| 76
| 0.633661
| false
| 4.214112
| false
| false
| false
|
SanPen/GridCal
|
src/research/PTDF/ACPTDF_research2.py
|
1
|
14022
|
# This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import pandas as pd
import numba as nb
import time
from warnings import warn
import scipy.sparse as sp
from scipy.sparse import coo_matrix, csc_matrix
from scipy.sparse import hstack as hs, vstack as vs
from scipy.sparse.linalg import factorized, spsolve, inv
from matplotlib import pyplot as plt
from GridCal.Engine import *
def SysMat(Y, Ys, pq, pvpq):
"""
Computes the system Jacobian matrix in polar coordinates
Args:
Ybus: Admittance matrix
V: Array of nodal voltages
Ibus: Array of nodal current injections
pq: Array with the indices of the PQ buses
pvpq: Array with the indices of the PV and PQ buses
Returns:
The system Jacobian matrix
"""
A11 = -Ys.imag[np.ix_(pvpq, pvpq)]
A12 = Y.real[np.ix_(pvpq, pq)]
A21 = -Ys.real[np.ix_(pq, pvpq)]
A22 = -Y.imag[np.ix_(pq, pq)]
Asys = sp.vstack([sp.hstack([A11, A12]),
sp.hstack([A21, A22])], format="csc")
return Asys
def compute_acptdf(Ybus, Yseries, Yf, Yt, Cf, V, pq, pv, distribute_slack):
"""
Compute the AC-PTDF
:param Ybus: admittance matrix
:param Yf: Admittance matrix of the buses "from"
:param Yt: Admittance matrix of the buses "to"
:param Cf: Connectivity branch - bus "from"
:param V: voltages array
:param Ibus: array of currents
:param pq: array of pq node indices
:param pv: array of pv node indices
:return: AC-PTDF matrix (branches, buses)
"""
n = len(V)
pvpq = np.r_[pv, pq]
npq = len(pq)
# compute the Jacobian
J = SysMat(Ybus, Yseries, pq, pvpq)
if distribute_slack:
dP = np.ones((n, n)) * (-1 / (n - 1))
for i in range(n):
dP[i, i] = 1.0
else:
dP = np.eye(n, n)
# compose the compatible array (the Q increments are considered zero
dQ = np.zeros((npq, n))
# dQ = np.eye(n, n)[pq, :]
dS = np.r_[dP[pvpq, :], dQ]
# solve the voltage increments
dx = spsolve(J, dS)
# compute branch derivatives
If = Yf * V
E = V / np.abs(V)
Vdiag = sp.diags(V)
Vdiag_conj = sp.diags(np.conj(V))
Ediag = sp.diags(E)
Ediag_conj = sp.diags(np.conj(E))
If_diag_conj = sp.diags(np.conj(If))
Yf_conj = Yf.copy()
Yf_conj.data = np.conj(Yf_conj.data)
Yt_conj = Yt.copy()
Yt_conj.data = np.conj(Yt_conj.data)
dSf_dVa = 1j * (If_diag_conj * Cf * Vdiag - sp.diags(Cf * V) * Yf_conj * Vdiag_conj)
dSf_dVm = If_diag_conj * Cf * Ediag - sp.diags(Cf * V) * Yf_conj * Ediag_conj
# compose the final AC-PTDF
dPf_dVa = dSf_dVa.real[:, pvpq]
dPf_dVm = dSf_dVm.real[:, pq]
PTDF = sp.hstack((dPf_dVa, dPf_dVm)) * dx
return PTDF
def make_lodf(circuit: SnapshotCircuit, PTDF, correct_values=True):
"""
:param circuit:
:param PTDF: PTDF matrix in numpy array form
:return:
"""
nl = circuit.nbr
# compute the connectivity matrix
Cft = circuit.C_branch_bus_f - circuit.C_branch_bus_t
H = PTDF * Cft.T
# old code
# h = sp.diags(H.diagonal())
# LODF = H / (np.ones((nl, nl)) - h * np.ones(nl))
# divide each row of H by the vector 1 - H.diagonal
# LODF = H / (1 - H.diagonal())
# replace possible nan and inf
# LODF[LODF == -np.inf] = 0
# LODF[LODF == np.inf] = 0
# LODF = np.nan_to_num(LODF)
# this loop avoids the divisions by zero
# in those cases the LODF column should be zero
LODF = np.zeros((nl, nl))
div = 1 - H.diagonal()
for j in range(H.shape[1]):
if div[j] != 0:
LODF[:, j] = H[:, j] / div[j]
# replace the diagonal elements by -1
# old code
# LODF = LODF - sp.diags(LODF.diagonal()) - sp.eye(nl, nl), replaced by:
for i in range(nl):
LODF[i, i] = - 1.0
if correct_values:
i1, j1 = np.where(LODF > 1)
for i, j in zip(i1, j1):
LODF[i, j] = 1
i2, j2 = np.where(LODF < -1)
for i, j in zip(i2, j2):
LODF[i, j] = -1
return LODF
def get_branch_time_series(circuit: TimeCircuit, PTDF):
"""
:param grid:
:return:
"""
# option 2: call the power directly
P = circuit.Sbus.real
Pbr = np.dot(PTDF, P).T * circuit.Sbase
return Pbr
def multiple_failure_old(flows, LODF, beta, delta, alpha):
"""
:param flows: array of all the pre-contingency flows
:param LODF: Line Outage Distribution Factors Matrix
:param beta: index of the first failed line
:param delta: index of the second failed line
:param alpha: index of the line where you want to see the effects
:return: post contingency flow in the line alpha
"""
# multiple contingency matrix
M = np.ones((2, 2))
M[0, 1] = -LODF[beta, delta]
M[1, 0] = -LODF[delta, beta]
# normal flows of the lines beta and delta
F = flows[[beta, delta]]
# contingency flows after failing the ines beta and delta
Ff = np.linalg.solve(M, F)
# flow delta in the line alpha after the multiple contingency of the lines beta and delta
L = LODF[alpha, :][[beta, delta]]
dFf_alpha = np.dot(L, Ff)
return F[alpha] + dFf_alpha
def multiple_failure(flows, LODF, failed_idx):
"""
From the paper:
Multiple Element Contingency Screening
IEEE TRANSACTIONS ON POWER SYSTEMS, VOL. 26, NO. 3, AUGUST 2011
C. Matthew Davis and Thomas J. Overbye
:param flows: array of all the pre-contingency flows (the base flows)
:param LODF: Line Outage Distribution Factors Matrix
:param failed_idx: indices of the failed lines
:return: all post contingency flows
"""
# multiple contingency matrix
M = -LODF[np.ix_(failed_idx, failed_idx)]
for i in range(len(failed_idx)):
M[i, i] = 1.0
# normal flows of the failed lines indicated by failed_idx
F = flows[failed_idx]
# Affected flows after failing the lines indicated by failed_idx
Ff = np.linalg.solve(M, F)
# flow delta in the line alpha after the multiple contingency of the lines indicated by failed_idx
L = LODF[:, failed_idx]
dFf_alpha = np.dot(L, Ff)
# return the final contingency flow as the base flow plus the contingency flow delta
return flows + dFf_alpha
def get_n_minus_1_flows(circuit: MultiCircuit):
opt = PowerFlowOptions()
branches = circuit.get_branches()
m = circuit.get_branch_number()
Pmat = np.zeros((m, m)) # monitored, contingency
for c, branch in enumerate(branches):
if branch.active:
branch.active = False
pf = PowerFlowDriver(circuit, opt)
pf.run()
Pmat[:, c] = pf.results.Sbranch.real
branch.active = True
return Pmat
def check_lodf(grid: MultiCircuit):
flows_n1_nr = get_n_minus_1_flows(grid)
# assume 1 island
nc = compile_snapshot_circuit(grid)
islands = split_into_islands(nc)
circuit = islands[0]
pf_driver = PowerFlowDriver(grid, PowerFlowOptions())
pf_driver.run()
PTDF = compute_acptdf(Ybus=circuit.Ybus,
Yseries=circuit.Yseries,
Yf=circuit.Yf,
Yt=circuit.Yt,
Cf=circuit.C_branch_bus_f,
V=pf_driver.results.voltage,
pq=circuit.pq,
pv=circuit.pv,
distribute_slack=True)
LODF = make_lodf(circuit, PTDF)
Pbus = circuit.get_injections(False).real
flows_n = np.dot(PTDF, Pbus)
nl = circuit.nbr
flows_n1 = np.zeros((nl, nl))
for c in range(nl): # branch that fails (contingency)
# for m in range(nl): # branch to monitor
# flows_n1[m, c] = flows_n[m] + LODF[m, c] * flows_n[c]
flows_n1[:, c] = flows_n[:] + LODF[:, c] * flows_n[c]
return flows_n, flows_n1_nr, flows_n1
def test_ptdf(grid):
"""
Sigma-distances test
:param grid:
:return:
"""
nc = compile_snapshot_circuit(grid)
islands = split_into_islands(nc)
circuit = islands[0] # pick the first island
pf_driver = PowerFlowDriver(grid, PowerFlowOptions())
pf_driver.run()
PTDF = compute_acptdf(Ybus=circuit.Ybus,
Yseries=circuit.Yseries,
Yf=circuit.Yf,
Yt=circuit.Yt,
Cf=circuit.C_branch_bus_f,
V=pf_driver.results.voltage,
pq=circuit.pq,
pv=circuit.pv,
distribute_slack=False)
print('PTDF:')
print(PTDF)
if __name__ == '__main__':
from GridCal.Engine import FileOpen
import pandas as pd
np.set_printoptions(threshold=sys.maxsize, linewidth=200000000)
# np.set_printoptions(linewidth=2000, suppress=True)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/IEEE39_1W.gridcal'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/IEEE 14.xlsx'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/lynn5buspv.xlsx'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/IEEE 118.xlsx'
fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/1354 Pegase.xlsx'
# fname = 'helm_data1.gridcal'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/IEEE 14 PQ only.gridcal'
# fname = 'IEEE 14 PQ only full.gridcal'
# fname = '/home/santi/Descargas/matpower-fubm-master/data/case5.m'
# fname = '/home/santi/Descargas/matpower-fubm-master/data/case30.m'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/PGOC_6bus.gridcal'
grid_ = FileOpen(fname).open()
test_ptdf(grid_)
name = os.path.splitext(fname.split(os.sep)[-1])[0]
method = 'ACPTDF (No Jacobian, V=Vpf)'
nc_ = compile_snapshot_circuit(grid_)
islands_ = split_into_islands(nc_)
circuit_ = islands_[0]
pf_driver_ = PowerFlowDriver(grid_, PowerFlowOptions())
pf_driver_.run()
H_ = compute_acptdf(Ybus=circuit_.Ybus,
Yseries=circuit_.Yseries,
Yf=circuit_.Yf,
Yt=circuit_.Yt,
Cf=circuit_.C_branch_bus_f,
V=pf_driver_.results.voltage,
pq=circuit_.pq,
pv=circuit_.pv,
distribute_slack=False)
LODF_ = make_lodf(circuit_, H_)
if H_.shape[0] < 50:
print('PTDF:\n', H_)
print('LODF:\n', LODF_)
flows_n_, flows_n1_nr_, flows_n1_ = check_lodf(grid_)
# in the case of the grid PGOC_6bus
flows_multiple = multiple_failure(flows=flows_n_,
LODF=LODF_,
failed_idx=[1, 5]) # failed lines 2 and 6
Pn1_nr_df = pd.DataFrame(data=flows_n1_nr_, index=nc_.branch_names, columns=nc_.branch_names)
flows_n1_df = pd.DataFrame(data=flows_n1_, index=nc_.branch_names, columns=nc_.branch_names)
# plot N-1
fig = plt.figure(figsize=(12, 8))
title = 'N-1 with ' + method + ' (' + name + ')'
fig.suptitle(title)
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
Pn1_nr_df.plot(ax=ax1, legend=False)
flows_n1_df.plot(ax=ax2, legend=False)
diff = Pn1_nr_df - flows_n1_df
diff.plot(ax=ax3, legend=False)
ax1.set_title('Newton-Raphson N-1 flows')
ax2.set_title('PTDF N-1 flows')
ax3.set_title('Difference')
fig.savefig(title + '.png')
# ------------------------------------------------------------------------------------------------------------------
# Perform real time series
# ------------------------------------------------------------------------------------------------------------------
if grid_.time_profile is not None:
grid_.ensure_profiles_exist()
nc_ts = compile_time_circuit(grid_)
islands_ts = split_time_circuit_into_islands(nc_ts)
circuit_ts = islands_ts[0]
pf_options = PowerFlowOptions()
ts_driver = TimeSeries(grid=grid_, options=pf_options)
ts_driver.run()
Pbr_nr = ts_driver.results.Sbranch.real
df_Pbr_nr = pd.DataFrame(data=Pbr_nr, columns=circuit_ts.branch_names, index=circuit_ts.time_array)
# Compute the PTDF based flows
Pbr_ptdf = get_branch_time_series(circuit=circuit_ts, PTDF=H_)
df_Pbr_ptdf = pd.DataFrame(data=Pbr_ptdf, columns=circuit_ts.branch_names, index=circuit_ts.time_array)
# plot
fig = plt.figure(figsize=(12, 8))
title = 'Flows with ' + method + ' (' + name + ')'
fig.suptitle(title)
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
df_Pbr_nr.plot(ax=ax1, legend=False)
df_Pbr_ptdf.plot(ax=ax2, legend=False)
diff = df_Pbr_nr - df_Pbr_ptdf
diff.plot(ax=ax3, legend=False)
ax1.set_title('Newton-Raphson flows')
ax2.set_title('PTDF flows')
ax3.set_title('Difference')
fig.savefig(title + '.png')
plt.show()
|
gpl-3.0
| 2,332,068,375,098,212,400
| 31.234483
| 120
| 0.590857
| false
| 3.160243
| false
| false
| false
|
Yubico/yubikey-manager
|
ykman/cli/fido.py
|
1
|
24461
|
# Copyright (c) 2018 Yubico AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from fido2.ctap import CtapError
from fido2.ctap1 import ApduError
from fido2.ctap2 import (
Ctap2,
ClientPin,
CredentialManagement,
FPBioEnrollment,
CaptureError,
)
from fido2.pcsc import CtapPcscDevice
from yubikit.core.fido import FidoConnection
from yubikit.core.smartcard import SW
from time import sleep
from .util import (
click_postpone_execution,
click_prompt,
click_force_option,
ykman_group,
prompt_timeout,
)
from .util import cli_fail
from ..fido import is_in_fips_mode, fips_reset, fips_change_pin, fips_verify_pin
from ..hid import list_ctap_devices
from ..device import is_fips_version
from ..pcsc import list_devices as list_ccid
from smartcard.Exceptions import NoCardException, CardConnectionException
from typing import Optional
import click
import logging
logger = logging.getLogger(__name__)
FIPS_PIN_MIN_LENGTH = 6
PIN_MIN_LENGTH = 4
@ykman_group(FidoConnection)
@click.pass_context
@click_postpone_execution
def fido(ctx):
"""
Manage the FIDO applications.
Examples:
\b
Reset the FIDO (FIDO2 and U2F) applications:
$ ykman fido reset
\b
Change the FIDO2 PIN from 123456 to 654321:
$ ykman fido access change-pin --pin 123456 --new-pin 654321
"""
conn = ctx.obj["conn"]
try:
ctx.obj["ctap2"] = Ctap2(conn)
except (ValueError, CtapError) as e:
logger.info("FIDO device does not support CTAP2: %s", e)
@fido.command()
@click.pass_context
def info(ctx):
"""
Display general status of the FIDO2 application.
"""
conn = ctx.obj["conn"]
ctap2 = ctx.obj.get("ctap2")
if is_fips_version(ctx.obj["info"].version):
click.echo("FIPS Approved Mode: " + ("Yes" if is_in_fips_mode(conn) else "No"))
elif ctap2:
client_pin = ClientPin(ctap2) # N.B. All YubiKeys with CTAP2 support PIN.
if ctap2.info.options["clientPin"]:
if ctap2.info.force_pin_change:
click.echo(
"NOTE: The FIDO PID is disabled and must be changed before it can "
"be used!"
)
pin_retries, power_cycle = client_pin.get_pin_retries()
if pin_retries:
click.echo(f"PIN is set, with {pin_retries} attempt(s) remaining.")
if power_cycle:
click.echo(
"PIN is temporarily blocked. "
"Remove and re-insert the YubiKey to unblock."
)
else:
click.echo("PIN is set, but has been blocked.")
else:
click.echo("PIN is not set.")
bio_enroll = ctap2.info.options.get("bioEnroll")
if bio_enroll:
uv_retries, _ = client_pin.get_uv_retries()
if uv_retries:
click.echo(
f"Fingerprints registered, with {uv_retries} attempt(s) "
"remaining."
)
else:
click.echo(
"Fingerprints registered, but blocked until PIN is verified."
)
elif bio_enroll is False:
click.echo("No fingerprints have been registered.")
always_uv = ctap2.info.options.get("alwaysUv")
if always_uv is not None:
click.echo(
"Always Require User Verification is turned "
+ ("on." if always_uv else "off.")
)
else:
click.echo("PIN is not supported.")
@fido.command("reset")
@click_force_option
@click.pass_context
def reset(ctx, force):
"""
Reset all FIDO applications.
This action will wipe all FIDO credentials, including FIDO U2F credentials,
on the YubiKey and remove the PIN code.
The reset must be triggered immediately after the YubiKey is
inserted, and requires a touch on the YubiKey.
"""
conn = ctx.obj["conn"]
if isinstance(conn, CtapPcscDevice): # NFC
readers = list_ccid(conn._name)
if not readers or readers[0].reader.name != conn._name:
logger.error(f"Multiple readers matched: {readers}")
cli_fail("Unable to isolate NFC reader.")
dev = readers[0]
logger.debug(f"use: {dev}")
is_fips = False
def prompt_re_insert():
click.echo(
"Remove and re-place your YubiKey on the NFC reader to perform the "
"reset..."
)
removed = False
while True:
sleep(0.5)
try:
with dev.open_connection(FidoConnection):
if removed:
sleep(1.0) # Wait for the device to settle
break
except CardConnectionException:
pass # Expected, ignore
except NoCardException:
removed = True
return dev.open_connection(FidoConnection)
else: # USB
n_keys = len(list_ctap_devices())
if n_keys > 1:
cli_fail("Only one YubiKey can be connected to perform a reset.")
is_fips = is_fips_version(ctx.obj["info"].version)
ctap2 = ctx.obj.get("ctap2")
if not is_fips and not ctap2:
cli_fail("This YubiKey does not support FIDO reset.")
def prompt_re_insert():
click.echo("Remove and re-insert your YubiKey to perform the reset...")
removed = False
while True:
sleep(0.5)
keys = list_ctap_devices()
if not keys:
removed = True
if removed and len(keys) == 1:
return keys[0].open_connection(FidoConnection)
if not force:
if not click.confirm(
"WARNING! This will delete all FIDO credentials, including FIDO U2F "
"credentials, and restore factory settings. Proceed?",
err=True,
):
ctx.abort()
if is_fips:
destroy_input = click_prompt(
"WARNING! This is a YubiKey FIPS device. This command will also "
"overwrite the U2F attestation key; this action cannot be undone and "
"this YubiKey will no longer be a FIPS compliant device.\n"
'To proceed, please enter the text "OVERWRITE"',
default="",
show_default=False,
)
if destroy_input != "OVERWRITE":
cli_fail("Reset aborted by user.")
conn = prompt_re_insert()
try:
with prompt_timeout():
if is_fips:
fips_reset(conn)
else:
Ctap2(conn).reset()
except CtapError as e:
logger.error("Reset failed", exc_info=e)
if e.code == CtapError.ERR.ACTION_TIMEOUT:
cli_fail(
"Reset failed. You need to touch your YubiKey to confirm the reset."
)
elif e.code in (CtapError.ERR.NOT_ALLOWED, CtapError.ERR.PIN_AUTH_BLOCKED):
cli_fail(
"Reset failed. Reset must be triggered within 5 seconds after the "
"YubiKey is inserted."
)
else:
cli_fail(f"Reset failed: {e.code.name}")
except ApduError as e: # From fips_reset
logger.error("Reset failed", exc_info=e)
if e.code == SW.COMMAND_NOT_ALLOWED:
cli_fail(
"Reset failed. Reset must be triggered within 5 seconds after the "
"YubiKey is inserted."
)
else:
cli_fail("Reset failed.")
except Exception as e:
logger.error(e)
cli_fail("Reset failed.")
def _fail_pin_error(ctx, e, other="%s"):
if e.code == CtapError.ERR.PIN_INVALID:
cli_fail("Wrong PIN.")
elif e.code == CtapError.ERR.PIN_AUTH_BLOCKED:
cli_fail(
"PIN authentication is currently blocked. "
"Remove and re-insert the YubiKey."
)
elif e.code == CtapError.ERR.PIN_BLOCKED:
cli_fail("PIN is blocked.")
else:
cli_fail(other % e.code)
@fido.group("access")
def access():
"""
Manage the PIN for FIDO.
"""
@access.command("change-pin")
@click.pass_context
@click.option("-P", "--pin", help="Current PIN code.")
@click.option("-n", "--new-pin", help="A new PIN.")
@click.option(
"-u", "--u2f", is_flag=True, help="Set FIDO U2F PIN instead of FIDO2 PIN."
)
def change_pin(ctx, pin, new_pin, u2f):
"""
Set or change the PIN code.
The FIDO2 PIN must be at least 4 characters long, and supports any type
of alphanumeric characters.
On YubiKey FIPS, a PIN can be set for FIDO U2F. That PIN must be at least
6 characters long.
"""
is_fips = is_fips_version(ctx.obj["info"].version)
if is_fips and not u2f:
cli_fail("This is a YubiKey FIPS. To set the U2F PIN, pass the --u2f option.")
if u2f and not is_fips:
cli_fail(
"This is not a YubiKey FIPS, and therefore does not support a U2F PIN. "
"To set the FIDO2 PIN, remove the --u2f option."
)
if is_fips:
conn = ctx.obj["conn"]
else:
ctap2 = ctx.obj.get("ctap2")
if not ctap2:
cli_fail("PIN is not supported on this YubiKey.")
client_pin = ClientPin(ctap2)
def prompt_new_pin():
return click_prompt(
"Enter your new PIN",
default="",
hide_input=True,
show_default=False,
confirmation_prompt=True,
)
def change_pin(pin, new_pin):
if pin is not None:
_fail_if_not_valid_pin(ctx, pin, is_fips)
try:
if is_fips:
try:
# Failing this with empty current PIN does not cost a retry
fips_change_pin(conn, pin or "", new_pin)
except ApduError as e:
if e.code == SW.WRONG_LENGTH:
pin = _prompt_current_pin()
_fail_if_not_valid_pin(ctx, pin, is_fips)
fips_change_pin(conn, pin, new_pin)
else:
raise
else:
client_pin.change_pin(pin, new_pin)
except CtapError as e:
logger.error("Failed to change PIN", exc_info=e)
if e.code == CtapError.ERR.PIN_POLICY_VIOLATION:
cli_fail("New PIN doesn't meet policy requirements.")
else:
_fail_pin_error(ctx, e, "Failed to change PIN: %s")
except ApduError as e:
logger.error("Failed to change PIN", exc_info=e)
if e.code == SW.VERIFY_FAIL_NO_RETRY:
cli_fail("Wrong PIN.")
elif e.code == SW.AUTH_METHOD_BLOCKED:
cli_fail("PIN is blocked.")
else:
cli_fail(f"Failed to change PIN: SW={e.code:04x}")
def set_pin(new_pin):
_fail_if_not_valid_pin(ctx, new_pin, is_fips)
try:
client_pin.set_pin(new_pin)
except CtapError as e:
logger.error("Failed to set PIN", exc_info=e)
if e.code == CtapError.ERR.PIN_POLICY_VIOLATION:
cli_fail("PIN is too long.")
else:
cli_fail(f"Failed to set PIN: {e.code}")
if not is_fips:
if ctap2.info.options.get("clientPin"):
if not pin:
pin = _prompt_current_pin()
else:
if pin:
cli_fail("There is no current PIN set. Use --new-pin to set one.")
if not new_pin:
new_pin = prompt_new_pin()
if is_fips:
_fail_if_not_valid_pin(ctx, new_pin, is_fips)
change_pin(pin, new_pin)
else:
if len(new_pin) < ctap2.info.min_pin_length:
cli_fail("New PIN is too short.")
if ctap2.info.options.get("clientPin"):
change_pin(pin, new_pin)
else:
set_pin(new_pin)
def _require_pin(ctx, pin, feature="This feature"):
ctap2 = ctx.obj.get("ctap2")
if not ctap2:
cli_fail(f"{feature} is not supported on this YubiKey.")
if not ctap2.info.options.get("clientPin"):
cli_fail(f"{feature} requires having a PIN. Set a PIN first.")
if ctap2.info.force_pin_change:
cli_fail("The FIDO PIN is blocked. Change the PIN first.")
if pin is None:
pin = _prompt_current_pin(prompt="Enter your PIN")
return pin
@access.command("verify-pin")
@click.pass_context
@click.option("-P", "--pin", help="Current PIN code.")
def verify(ctx, pin):
"""
Verify the FIDO PIN against a YubiKey.
For YubiKeys supporting FIDO2 this will reset the "retries" counter of the PIN.
For YubiKey FIPS this will unlock the session, allowing U2F registration.
"""
ctap2 = ctx.obj.get("ctap2")
if ctap2:
pin = _require_pin(ctx, pin)
client_pin = ClientPin(ctap2)
try:
# Get a PIN token to verify the PIN.
client_pin.get_pin_token(
pin, ClientPin.PERMISSION.GET_ASSERTION, "ykman.example.com"
)
except CtapError as e:
logger.error("PIN verification failed", exc_info=e)
cli_fail(f"Error: {e}")
elif is_fips_version(ctx.obj["info"].version):
_fail_if_not_valid_pin(ctx, pin, True)
try:
fips_verify_pin(ctx.obj["conn"], pin)
except ApduError as e:
logger.error("PIN verification failed", exc_info=e)
if e.code == SW.VERIFY_FAIL_NO_RETRY:
cli_fail("Wrong PIN.")
elif e.code == SW.AUTH_METHOD_BLOCKED:
cli_fail("PIN is blocked.")
elif e.code == SW.COMMAND_NOT_ALLOWED:
cli_fail("PIN is not set.")
else:
cli_fail(f"PIN verification failed: {e.code.name}")
else:
cli_fail("This YubiKey does not support a FIDO PIN.")
click.echo("PIN verified.")
def _prompt_current_pin(prompt="Enter your current PIN"):
return click_prompt(prompt, default="", hide_input=True, show_default=False)
def _fail_if_not_valid_pin(ctx, pin=None, is_fips=False):
min_length = FIPS_PIN_MIN_LENGTH if is_fips else PIN_MIN_LENGTH
if not pin or len(pin) < min_length:
ctx.fail(f"PIN must be over {min_length} characters long")
def _gen_creds(credman):
data = credman.get_metadata()
if data.get(CredentialManagement.RESULT.EXISTING_CRED_COUNT) == 0:
return # No credentials
for rp in credman.enumerate_rps():
for cred in credman.enumerate_creds(rp[CredentialManagement.RESULT.RP_ID_HASH]):
yield (
rp[CredentialManagement.RESULT.RP]["id"],
cred[CredentialManagement.RESULT.CREDENTIAL_ID],
cred[CredentialManagement.RESULT.USER]["id"],
cred[CredentialManagement.RESULT.USER]["name"],
)
def _format_cred(rp_id, user_id, user_name):
return f"{rp_id} {user_id.hex()} {user_name}"
@fido.group("credentials")
def creds():
"""
Manage discoverable (resident) credentials.
This command lets you manage credentials stored on your YubiKey.
Credential management is only available when a FIDO PIN is set on the YubiKey.
\b
Examples:
\b
List credentials (providing PIN via argument):
$ ykman fido credentials list --pin 123456
\b
Delete a credential by user name (PIN will be prompted for):
$ ykman fido credentials delete example_user
"""
def _init_credman(ctx, pin):
pin = _require_pin(ctx, pin, "Credential Management")
ctap2 = ctx.obj.get("ctap2")
client_pin = ClientPin(ctap2)
try:
token = client_pin.get_pin_token(pin, ClientPin.PERMISSION.CREDENTIAL_MGMT)
except CtapError as e:
logger.error("Ctap error", exc_info=e)
_fail_pin_error(ctx, e, "PIN error: %s")
return CredentialManagement(ctap2, client_pin.protocol, token)
@creds.command("list")
@click.pass_context
@click.option("-P", "--pin", help="PIN code.")
def creds_list(ctx, pin):
"""
List credentials.
"""
creds = _init_credman(ctx, pin)
for (rp_id, _, user_id, user_name) in _gen_creds(creds):
click.echo(_format_cred(rp_id, user_id, user_name))
@creds.command("delete")
@click.pass_context
@click.argument("query")
@click.option("-P", "--pin", help="PIN code.")
@click.option("-f", "--force", is_flag=True, help="Confirm deletion without prompting")
def creds_delete(ctx, query, pin, force):
"""
Delete a credential.
\b
QUERY A unique substring match of a credentials RP ID, user ID (hex) or name,
or credential ID.
"""
credman = _init_credman(ctx, pin)
hits = [
(rp_id, cred_id, user_id, user_name)
for (rp_id, cred_id, user_id, user_name) in _gen_creds(credman)
if query.lower() in user_name.lower()
or query.lower() in rp_id.lower()
or user_id.hex().startswith(query.lower())
or query.lower() in _format_cred(rp_id, user_id, user_name)
]
if len(hits) == 0:
cli_fail("No matches, nothing to be done.")
elif len(hits) == 1:
(rp_id, cred_id, user_id, user_name) = hits[0]
if force or click.confirm(
f"Delete credential {_format_cred(rp_id, user_id, user_name)}?"
):
try:
credman.delete_cred(cred_id)
except CtapError as e:
logger.error("Failed to delete resident credential", exc_info=e)
cli_fail("Failed to delete resident credential.")
else:
cli_fail("Multiple matches, make the query more specific.")
@fido.group("fingerprints")
def bio():
"""
Manage fingerprints.
Requires a YubiKey with fingerprint sensor.
Fingerprint management is only available when a FIDO PIN is set on the YubiKey.
\b
Examples:
\b
Register a new fingerprint (providing PIN via argument):
$ ykman fido fingerprints add "Left thumb" --pin 123456
\b
List already stored fingerprints (providing PIN via argument):
$ ykman fido fingerprints list --pin 123456
\b
Delete a stored fingerprint with ID "f691" (PIN will be prompted for):
$ ykman fido fingerprints delete f691
"""
def _init_bio(ctx, pin):
ctap2 = ctx.obj.get("ctap2")
if not ctap2 or "bioEnroll" not in ctap2.info.options:
cli_fail("Biometrics is not supported on this YubiKey.")
pin = _require_pin(ctx, pin, "Biometrics")
client_pin = ClientPin(ctap2)
try:
token = client_pin.get_pin_token(pin, ClientPin.PERMISSION.BIO_ENROLL)
except CtapError as e:
logger.error("Ctap error", exc_info=e)
_fail_pin_error(ctx, e, "PIN error: %s")
return FPBioEnrollment(ctap2, client_pin.protocol, token)
def _format_fp(template_id, name):
return f"{template_id.hex()}{f' ({name})' if name else ''}"
@bio.command("list")
@click.pass_context
@click.option("-P", "--pin", help="PIN code.")
def bio_list(ctx, pin):
"""
List registered fingerprint.
Lists fingerprints by ID and (if available) label.
"""
bio = _init_bio(ctx, pin)
for t_id, name in bio.enumerate_enrollments().items():
click.echo(f"ID: {_format_fp(t_id, name)}")
@bio.command("add")
@click.pass_context
@click.argument("name")
@click.option("-P", "--pin", help="PIN code.")
def bio_enroll(ctx, name, pin):
"""
Add a new fingerprint.
\b
NAME A short readable name for the fingerprint (eg. "Left thumb").
"""
if len(name.encode()) > 15:
ctx.fail("Fingerprint name must be a maximum of 15 characters")
bio = _init_bio(ctx, pin)
enroller = bio.enroll()
template_id = None
while template_id is None:
click.echo("Place your finger against the sensor now...")
try:
template_id = enroller.capture()
remaining = enroller.remaining
if remaining:
click.echo(f"{remaining} more scans needed.")
except CaptureError as e:
logger.error(f"Capture error: {e.code}")
click.echo("Capture failed. Re-center your finger, and try again.")
except CtapError as e:
logger.error("Failed to add fingerprint template", exc_info=e)
if e.code == CtapError.ERR.FP_DATABASE_FULL:
cli_fail(
"Fingerprint storage full. "
"Remove some fingerprints before adding new ones."
)
elif e.code == CtapError.ERR.USER_ACTION_TIMEOUT:
cli_fail("Failed to add fingerprint due to user inactivity.")
cli_fail(f"Failed to add fingerprint: {e.code.name}")
click.echo("Capture complete.")
bio.set_name(template_id, name)
@bio.command("rename")
@click.pass_context
@click.argument("template_id", metavar="ID")
@click.argument("name")
@click.option("-P", "--pin", help="PIN code.")
def bio_rename(ctx, template_id, name, pin):
"""
Set the label for a fingerprint.
\b
ID The ID of the fingerprint to rename (as shown in "list").
NAME A short readable name for the fingerprint (eg. "Left thumb").
"""
if len(name) >= 16:
ctx.fail("Fingerprint name must be a maximum of 15 characters")
bio = _init_bio(ctx, pin)
enrollments = bio.enumerate_enrollments()
key = bytes.fromhex(template_id)
if key not in enrollments:
cli_fail(f"No fingerprint matching ID={template_id}.")
bio.set_name(key, name)
@bio.command("delete")
@click.pass_context
@click.argument("template_id", metavar="ID")
@click.option("-P", "--pin", help="PIN code.")
@click.option("-f", "--force", is_flag=True, help="Confirm deletion without prompting")
def bio_delete(ctx, template_id, pin, force):
"""
Delete a fingerprint.
Delete a fingerprint from the YubiKey by its ID, which can be seen by running the
"list" subcommand.
"""
bio = _init_bio(ctx, pin)
enrollments = bio.enumerate_enrollments()
try:
key: Optional[bytes] = bytes.fromhex(template_id)
except ValueError:
key = None
if key not in enrollments:
# Match using template_id as NAME
matches = [k for k in enrollments if enrollments[k] == template_id]
if len(matches) == 0:
cli_fail(f"No fingerprint matching ID={template_id}")
elif len(matches) > 1:
cli_fail(
f"Multiple matches for NAME={template_id}. "
"Delete by template ID instead."
)
key = matches[0]
name = enrollments[key]
if force or click.confirm(f"Delete fingerprint {_format_fp(key, name)}?"):
try:
bio.remove_enrollment(key)
except CtapError as e:
logger.error("Failed to delete fingerprint template", exc_info=e)
cli_fail(f"Failed to delete fingerprint: {e.code.name}")
|
bsd-2-clause
| -6,043,912,827,052,858,000
| 32.010796
| 88
| 0.589346
| false
| 3.633002
| false
| false
| false
|
kobotoolbox/kobocat
|
onadata/apps/logger/tests/test_briefcase_client.py
|
1
|
6934
|
# coding: utf-8
import os.path
from io import StringIO, BytesIO
from urllib.parse import urljoin
import requests
from django.contrib.auth import authenticate
from django.core.files.storage import get_storage_class
from django.core.files.uploadedfile import UploadedFile
from django.urls import reverse
from django.test import RequestFactory
from django_digest.test import Client as DigestClient
from httmock import urlmatch, HTTMock
from onadata.apps.logger.models import Instance, XForm
from onadata.apps.logger.views import formList, download_xform, xformsManifest
from onadata.apps.main.models import MetaData
from onadata.apps.main.tests.test_base import TestBase
from onadata.apps.main.views import profile, download_media_data
from onadata.libs.utils.briefcase_client import BriefcaseClient
from onadata.libs.utils.storage import delete_user_storage
storage = get_storage_class()()
@urlmatch(netloc=r'(.*\.)?testserver$')
def form_list_xml(url, request, **kwargs):
response = requests.Response()
factory = RequestFactory()
req = factory.get(url.path)
req.user = authenticate(username='bob', password='bob')
req.user.profile.require_auth = False
req.user.profile.save()
id_string = 'transportation_2011_07_25'
if url.path.endswith('formList'):
res = formList(req, username='bob')
elif url.path.endswith('form.xml'):
res = download_xform(req, username='bob', id_string=id_string)
elif url.path.find('xformsManifest') > -1:
res = xformsManifest(req, username='bob', id_string=id_string)
elif url.path.find('formid-media') > -1:
data_id = url.path[url.path.rfind('/') + 1:]
res = download_media_data(
req, username='bob', id_string=id_string, data_id=data_id)
response._content = get_streaming_content(res)
else:
res = formList(req, username='bob')
response.status_code = 200
if not response._content:
response._content = res.content
return response
def get_streaming_content(res):
tmp = BytesIO()
for chunk in res.streaming_content:
tmp.write(chunk)
content = tmp.getvalue()
tmp.close()
return content
@urlmatch(netloc=r'(.*\.)?testserver$')
def instances_xml(url, request, **kwargs):
response = requests.Response()
client = DigestClient()
client.set_authorization('bob', 'bob', 'Digest')
res = client.get('%s?%s' % (url.path, url.query))
if res.status_code == 302:
res = client.get(res['Location'])
response.encoding = res.get('content-type')
response._content = get_streaming_content(res)
else:
response._content = res.content
response.status_code = 200
return response
class TestBriefcaseClient(TestBase):
def setUp(self):
TestBase.setUp(self)
self._publish_transportation_form()
self._submit_transport_instance_w_attachment()
src = os.path.join(self.this_directory, "fixtures",
"transportation", "screenshot.png")
uf = UploadedFile(file=open(src, 'rb'), content_type='image/png')
count = MetaData.objects.count()
MetaData.media_upload(self.xform, uf)
self.assertEqual(MetaData.objects.count(), count + 1)
url = urljoin(
self.base_url,
reverse(profile, kwargs={'username': self.user.username})
)
self._logout()
self._create_user_and_login('deno', 'deno')
self.bc = BriefcaseClient(
username='bob', password='bob',
url=url,
user=self.user
)
def test_download_xform_xml(self):
"""
Download xform via briefcase api
"""
with HTTMock(form_list_xml):
self.bc.download_xforms()
is_local = storage.__class__.__name__ == 'FileSystemStorage'
forms_folder_path = os.path.join('deno',
'briefcase',
'forms',
self.xform.id_string)
forms_path = os.path.join(forms_folder_path,
'%s.xml' % self.xform.id_string)
form_media_path = os.path.join(forms_folder_path, 'form-media')
media_path = os.path.join(form_media_path, 'screenshot.png')
if is_local:
does_root_folder_exist = storage.exists(forms_folder_path)
does_media_folder_exist = storage.exists(form_media_path)
else:
# `django-storage.exists()` does not work with folders on AWS
sub_folders, files = storage.listdir(forms_folder_path)
does_root_folder_exist = bool(sub_folders or files)
does_media_folder_exist = 'form-media' in sub_folders
self.assertTrue(does_root_folder_exist)
self.assertTrue(storage.exists(forms_path))
self.assertTrue(does_media_folder_exist)
self.assertTrue(storage.exists(media_path))
"""
Download instance xml
"""
with HTTMock(instances_xml):
self.bc.download_instances(self.xform.id_string)
instance_folder_path = os.path.join(forms_folder_path, 'instances')
if is_local:
does_instances_folder_exist = storage.exists(instance_folder_path)
else:
sub_folders, _ = storage.listdir(forms_folder_path)
does_instances_folder_exist = 'instances' in sub_folders
self.assertTrue(does_instances_folder_exist)
instance = Instance.objects.all()[0]
instance_path = os.path.join(
instance_folder_path, 'uuid%s' % instance.uuid, 'submission.xml')
self.assertTrue(storage.exists(instance_path))
media_file = "1335783522563.jpg"
media_path = os.path.join(
instance_folder_path, 'uuid%s' % instance.uuid, media_file)
self.assertTrue(storage.exists(media_path))
def test_push(self):
with HTTMock(form_list_xml):
self.bc.download_xforms()
with HTTMock(instances_xml):
self.bc.download_instances(self.xform.id_string)
XForm.objects.all().delete()
xforms = XForm.objects.filter(
user=self.user, id_string=self.xform.id_string)
self.assertTrue(xforms.count() == 0)
instances = Instance.objects.filter(
xform__user=self.user, xform__id_string=self.xform.id_string)
self.assertTrue(instances.count() == 0)
self.bc.push()
xforms = XForm.objects.filter(
user=self.user, id_string=self.xform.id_string)
self.assertTrue(xforms.count() == 1)
instances = Instance.objects.filter(
xform__user=self.user, xform__id_string=self.xform.id_string)
self.assertTrue(instances.count() == 1)
def tearDown(self):
# remove media files
for username in ['bob', 'deno']:
delete_user_storage(username)
|
bsd-2-clause
| 5,599,805,507,777,540,000
| 37.098901
| 78
| 0.62792
| false
| 3.760304
| true
| false
| false
|
mmlab/eice
|
EiCGraphAlgo/core/typeahead.py
|
1
|
4270
|
'''
Created on 17-sep.-2012
@author: ldevocht
'''
import urllib.parse, lxml.objectify, logging, configparser, re, ujson, requests
from core.resourceretriever import Resourceretriever
from core import resourceretriever, config_search
config = resourceretriever.config
mappings = resourceretriever.mappings
logger = logging.getLogger('pathFinder')
lookup_server = config.get('services', 'lookup_index')
#lookup_solr = Solr(lookup_server)
class TypeAhead:
def __init__(self):
self.session = requests.session()
def dbPediaPrefix(self, prefix):
server = config.get('services', 'lookup')
gateway = '{0}/api/search.asmx/PrefixSearch?MaxHits=7&QueryString={1}'.format(server,prefix)
requestUrl = urllib.parse.quote(gateway, ':/=?<>"*&')
logger.debug('Request %s' % requestUrl)
#rq = grequests.get(requestUrl)
#response = grequests.map([rq])
#raw_output = response[0].content
#raw_output = urllib.request.urlopen(requestUrl,timeout=2).read()
#s = requests.Session()
#s.headers.update({'Connection': 'close'})
r = self.session.get(requestUrl)
#(s.headers)
#print(r.headers)
raw_output = r.content
root = lxml.objectify.fromstring(raw_output)
results = list()
if hasattr(root, 'Result'):
logger.debug('Found %s results' % len(root.Result))
for result in root.Result:
if prefix.lower() in result.Label[0].text.lower() and hasattr(result.Classes, 'Class'):
klasses = result.Classes.Class
if hasattr(klasses, 'Label'):
klasse = klasses
else:
klasse = klasses[0]
item = dict()
item['label'] = result.Label[0].text
item['category']=klasse.Label.text.capitalize()
item['uri']=result.URI[0].text
logger.debug('Fetching local hits for %s' % len(item['uri']))
local_hits = Resourceretriever().getResource(item['uri'].strip("<>"),False)
if local_hits:
logger.debug('Found %s hits' % len(local_hits))
n_hits = 0
if local_hits:
for triple in local_hits:
if local_hits[triple][1] not in config_search.blacklist:
n_hits += 1
if n_hits > 8:
results.append(item)
else:
logger.debug('Found nothing for prefix %s' % prefix)
return results
def prefix(self, prefix,lookup_server=lookup_server):
results = list()
if len(prefix) > 2:
logger.debug('looking up %s on dbpedia lookup' % prefix)
results += self.dbPediaPrefix(prefix)
logger.debug('looking up %s on local index' % prefix)
if config.has_option('services','lookup_index'):
#query={'q':'lookup:"{0}*"'.format(re.escape(prefix).lower()),'fl':'url label type','timeAllowed':'100','rows':'7'}
#response = lookup_solr.search(**query)
query = '%sselect?q=lookup:"%s*"&fl=url label type&wt=json' % (lookup_server,re.escape(prefix).lower())
rsp = self.session.get(query)
#response = grequests.map([rq])
response = ujson.decode(rsp.content)['response']
if len(response['docs']) > 0:
for doc in response['docs']:
item = dict()
item['category']=doc['type'].split(' ')[0].rsplit('/')[-1].rsplit('#')[-1].strip('<>".')
if item['category'] == 'Agent':
item['category'] = 'Author'
item['uri']=doc['url']
item['label']=(doc['label'].split('.')[0].split('"^^')[0]).strip('\" <>.')
results.append(item)
logger.debug('done finding matches for %s' % prefix)
return results
#print(TypeAhead().prefix('Selver'))
#print(TypeAhead().dbPediaPrefix('Selver'))
|
agpl-3.0
| -1,328,228,545,254,396,400
| 45.423913
| 131
| 0.52623
| false
| 4.14161
| true
| false
| false
|
gaeun/open-event-orga-server
|
app/api/helpers/utils.py
|
1
|
7209
|
import json
from hashlib import md5
from flask import request
from flask.ext.restplus import Resource as RestplusResource
from flask_restplus import Model, fields, reqparse
from app.helpers.data import update_version
from app.models.event import Event as EventModel
from .error_docs import (
notfound_error_model,
notauthorized_error_model,
validation_error_model,
invalidservice_error_model,
)
from .helpers import get_object_list, get_object_or_404, get_object_in_event, \
create_model, validate_payload, delete_model, update_model, \
handle_extra_payload, get_paginated_list, fix_attribute_names
DEFAULT_PAGE_START = 1
DEFAULT_PAGE_LIMIT = 20
POST_RESPONSES = {
400: ('Validation error', validation_error_model),
401: ('Authentication failure', notauthorized_error_model),
404: ('Event does not exist', notfound_error_model),
201: 'Resource created successfully'
}
PUT_RESPONSES = {
400: ('Validation Error', validation_error_model),
401: ('Authentication failure', notauthorized_error_model),
404: ('Object/Event not found', notfound_error_model)
}
SERVICE_RESPONSES = {
404: ('Service not found', notfound_error_model),
400: ('Service does not belong to event', invalidservice_error_model),
}
# Parameters for a paginated response
PAGE_PARAMS = {
'start': {
'description': 'Serial number to start from',
'type': int,
'default': DEFAULT_PAGE_START
},
'limit': {
'description': 'Limit on the number of results',
'type': int,
'default': DEFAULT_PAGE_LIMIT
},
}
# ETag Header (required=False by default)
ETAG_HEADER_DEFN = [
'If-None-Match', 'ETag saved by client for cached resource'
]
# Base Api Model for a paginated response
PAGINATED_MODEL = Model('PaginatedModel', {
'start': fields.Integer,
'limit': fields.Integer,
'count': fields.Integer,
'next': fields.String,
'previous': fields.String
})
# Custom Resource Class
class Resource(RestplusResource):
def dispatch_request(self, *args, **kwargs):
resp = super(Resource, self).dispatch_request(*args, **kwargs)
# ETag checking.
if request.method == 'GET':
old_etag = request.headers.get('If-None-Match', '')
# Generate hash
data = json.dumps(resp)
new_etag = md5(data).hexdigest()
if new_etag == old_etag:
# Resource has not changed
return '', 304
else:
# Resource has changed, send new ETag value
return resp, 200, {'ETag': new_etag}
elif request.method == 'POST':
# Grab just the response data
# Exclude status code and headers
resp_data = resp[0]
data = json.dumps(resp_data)
etag = md5(data).hexdigest()
# Add ETag to response headers
resp[2].update({'ETag': etag})
return resp
# Base class for Paginated Resource
class PaginatedResourceBase():
"""
Paginated Resource Helper class
This includes basic properties used in the class
"""
parser = reqparse.RequestParser()
parser.add_argument('start', type=int, default=DEFAULT_PAGE_START)
parser.add_argument('limit', type=int, default=DEFAULT_PAGE_LIMIT)
# DAO for Models
class BaseDAO:
"""
DAO for a basic independent model
"""
version_key = None
is_importing = False # temp key to set to True when an import operation is underway
def __init__(self, model, post_api_model=None, put_api_model=None):
self.model = model
self.post_api_model = post_api_model
self.put_api_model = put_api_model if put_api_model else post_api_model
def get(self, id_):
return get_object_or_404(self.model, id_)
def list(self, **kwargs):
return get_object_list(self.model, **kwargs)
def paginated_list(self, url=None, args={}, **kwargs):
return get_paginated_list(self.model, url=url, args=args, **kwargs)
def create(self, data, validate=True):
if validate:
data = self.validate(data, self.post_api_model)
item = create_model(self.model, data)
self.update_version(item.id)
return item
def update(self, id_, data, validate=True):
if validate:
data = self.validate_put(data, self.put_api_model)
item = update_model(self.model, id_, data)
self.update_version(id_)
return item
def delete(self, id_):
item = delete_model(self.model, id_)
self.update_version(id_)
return item
def validate(self, data, model=None, check_required=True):
if not model:
model = self.post_api_model
if model:
data = handle_extra_payload(data, model)
validate_payload(data, model, check_required=check_required)
data = fix_attribute_names(data, model)
return data
def validate_put(self, data, model=None):
"""
Abstraction over validate with check_required set to False
"""
return self.validate(data, model=model, check_required=False)
def update_version(self, event_id):
"""
Update version of the component of the event
"""
if self.version_key:
update_version(event_id, False, self.version_key)
# Helper functions
def _del(self, data, fields):
"""
Safe delete fields from payload
"""
data_copy = data.copy()
for field in fields:
if field in data:
del data_copy[field]
return data_copy
# DAO for Service Models
class ServiceDAO(BaseDAO):
"""
Data Access Object for service models like microlocations,
speakers and so.
"""
def get(self, event_id, sid):
return get_object_in_event(self.model, sid, event_id)
def list(self, event_id, **kwargs):
# Check if an event with `event_id` exists
get_object_or_404(EventModel, event_id)
return get_object_list(self.model, event_id=event_id, **kwargs)
def paginated_list(self, url=None, args={}, **kwargs):
return get_paginated_list(self.model, url=url, args=args, **kwargs)
def create(self, event_id, data, url, validate=True):
if validate:
data = self.validate(data)
item = create_model(self.model, data, event_id=event_id)
self.update_version(event_id)
# Return created resource with a 201 status code and its Location
# (url) in the header.
resource_location = url + '/' + str(item.id)
return item, 201, {'Location': resource_location}
def update(self, event_id, service_id, data, validate=True):
if validate:
data = self.validate_put(data)
item = update_model(self.model, service_id, data, event_id)
self.update_version(event_id)
return item
def delete(self, event_id, service_id):
item = delete_model(self.model, service_id, event_id=event_id)
self.update_version(event_id)
return item
# store task results in case of testing
# state and info
TASK_RESULTS = {}
|
gpl-3.0
| 3,571,529,669,562,688,000
| 30.207792
| 88
| 0.625468
| false
| 3.80623
| false
| false
| false
|
wradlib/wradlib
|
wradlib/tests/test_adjust.py
|
1
|
7916
|
#!/usr/bin/env python
# Copyright (c) 2011-2020, wradlib developers.
# Distributed under the MIT License. See LICENSE.txt for more info.
import numpy as np
import pytest
from wradlib import adjust
class Data:
# Arguments to be used throughout all test classes
raw_x, raw_y = np.meshgrid(np.arange(4).astype("f4"), np.arange(4).astype("f4"))
raw_coords = np.vstack((raw_x.ravel(), raw_y.ravel())).T
obs_coords = np.array([[1.0, 1.0], [2.0, 1.0], [1.0, 3.5], [3.5, 3.0]])
raw = np.array(
[
[
1.0,
2.0,
1.0,
0.0,
1.0,
2.0,
1.0,
2.0,
1.0,
0.0,
0.0,
3.0,
4.0,
0.0,
4.0,
0.0,
],
[
1.0,
2.0,
1.0,
0.0,
1.0,
2.0,
1.0,
2.0,
1.0,
0.0,
0.0,
3.0,
4.0,
0.0,
4.0,
0.0,
],
]
).T
obs = np.array([[2.0, 3, 0.0, 4.0], [2.0, 3, 0.0, 4.0]]).T
nnear_raws = 2
mingages = 3
class TestAdjustBase(Data):
def test___init__(self):
pass
def test__checkip(self):
pass
def test__check_shape(self):
pass
def test___call__(self):
pass
def test__get_valid_pairs(self):
pass
def test_xvalidate(self):
pass
class TestAdjustAddTest(Data):
def test_AdjustAdd_1(self):
adj = adjust.AdjustAdd(
self.obs_coords,
self.raw_coords,
nnear_raws=self.nnear_raws,
mingages=self.mingages,
)
res = adj(self.obs, self.raw)
shouldbe = np.array(
[
[1.62818784, 1.62818784],
[2.75926679, 2.75926679],
[2.09428144, 2.09428144],
[1.1466651, 1.1466651],
[1.51948941, 1.51948941],
[2.5, 2.5],
[2.5, 2.5],
[3.27498305, 3.27498305],
[1.11382822, 1.11382822],
[0.33900645, 0.33900645],
[0.89999998, 0.89999998],
[4.52409637, 4.52409637],
[3.08139533, 3.08139533],
[0.0, 0.0],
[3.99180328, 3.99180328],
[2.16913891, 2.16913891],
]
)
assert np.allclose(res, shouldbe)
# test in case only one dataset is passed
res = adj(self.obs[:, 0], self.raw[:, 0])
assert np.allclose(res, shouldbe[:, 0])
class TestAdjustMultiplyTest(Data):
def test_AdjustMultiply_1(self):
adj = adjust.AdjustMultiply(
self.obs_coords,
self.raw_coords,
nnear_raws=self.nnear_raws,
mingages=self.mingages,
)
res = adj(self.obs, self.raw)
shouldbe = np.array(
[
[1.44937706, 1.44937706],
[3.04539442, 3.04539442],
[1.74463618, 1.74463618],
[0.0, 0.0],
[1.37804615, 1.37804615],
[2.66666675, 2.66666675],
[2.0, 2.0],
[3.74106812, 3.74106812],
[1.17057478, 1.17057478],
[0.0, 0.0],
[0.0, 0.0],
[6.14457822, 6.14457822],
[2.43439031, 2.43439031],
[0.0, 0.0],
[4.60765028, 4.60765028],
[0.0, 0.0],
]
)
assert np.allclose(res, shouldbe)
# test in case only one dataset is passed
res = adj(self.obs[:, 0], self.raw[:, 0])
assert np.allclose(res, shouldbe[:, 0])
class TestAdjustMixed(Data):
def test_AdjustMixed_1(self):
adj = adjust.AdjustMixed(
self.obs_coords,
self.raw_coords,
nnear_raws=self.nnear_raws,
mingages=self.mingages,
)
res = adj(self.obs, self.raw)
shouldbe = np.array(
[
[1.51427719, 1.51427719],
[2.95735525, 2.95735525],
[1.85710269, 1.85710269],
[0.36806121, 0.36806121],
[1.43181512, 1.43181512],
[2.61538471, 2.61538471],
[2.15384617, 2.15384617],
[3.59765723, 3.59765723],
[1.18370627, 1.18370627],
[0.15027952, 0.15027952],
[0.30825174, 0.30825174],
[5.63558862, 5.63558862],
[2.49066845, 2.49066845],
[-0.29200733, -0.29200733],
[4.31646909, 4.31646909],
[0.67854041, 0.67854041],
]
)
assert np.allclose(res, shouldbe)
# test in case only one dataset is passed
res = adj(self.obs[:, 0], self.raw[:, 0])
assert np.allclose(res, shouldbe[:, 0])
class TestAdjustMFB(Data):
raw_coords = np.array([[0.0, 0.0], [1.0, 1.0]])
obs_coords = np.array([[0.5, 0.5], [1.5, 1.5]])
raw = np.array([2.0, 2.0])
obs = np.array([4.0, 4.0])
mingages = 0
mfb_args = dict(method="mean")
def test_AdjustMFB_1(self):
adj = adjust.AdjustMFB(
self.obs_coords,
self.raw_coords,
nnear_raws=self.nnear_raws,
mingages=self.mingages,
mfb_args=self.mfb_args,
)
res = adj(self.obs, self.raw)
shouldbe = np.array([4.0, 4.0])
assert np.allclose(res, shouldbe)
adj = adjust.AdjustMFB(
self.obs_coords,
self.raw_coords,
nnear_raws=self.nnear_raws,
mingages=self.mingages,
mfb_args=dict(method="median"),
)
adj(self.obs, self.raw)
adj = adjust.AdjustMFB(
self.obs_coords,
self.raw_coords,
nnear_raws=self.nnear_raws,
mingages=self.mingages,
mfb_args=dict(method="linregr", minslope=1.0, minr="0.7", maxp=0.5),
)
adj(self.obs, self.raw)
class TestAdjustNone(Data):
raw_coords = np.array([[0.0, 0.0], [1.0, 1.0]])
obs_coords = np.array([[0.5, 0.5], [1.5, 1.5]])
raw = np.array([2.0, 2.0])
obs = np.array([4.0, 4.0])
mingages = 0
mfb_args = dict(method="mean")
def test_AdjustNone_1(self):
adj = adjust.AdjustNone(
self.obs_coords,
self.raw_coords,
nnear_raws=self.nnear_raws,
mingages=self.mingages,
)
res = adj(self.obs, self.raw)
shouldbe = np.array([2.0, 2.0])
assert np.allclose(res, shouldbe)
class TestGageOnly(Data):
raw_coords = np.array([[0.0, 0.0], [1.0, 1.0]])
obs_coords = np.array([[0.5, 0.5], [1.5, 1.5]])
raw = np.array([2.0, 2.0])
obs = np.array([4.0, 4.0])
mingages = 0
mfb_args = dict(method="mean")
def test_GageOnly_1(self):
adj = adjust.GageOnly(
self.obs_coords,
self.raw_coords,
nnear_raws=self.nnear_raws,
mingages=self.mingages,
)
res = adj(self.obs, self.raw)
shouldbe = np.array([4.0, 4.0])
assert np.allclose(res, shouldbe)
class TestAdjustHelper:
def test__get_neighbours_ix(self):
pass
def test__get_statfunc(self):
adjust._get_statfunc("median")
adjust._get_statfunc("best")
with pytest.raises(NameError):
adjust._get_statfunc("wradlib")
def test_best(self):
x = 7.5
y = np.array([0.0, 1.0, 0.0, 1.0, 0.0, 7.7, 8.0, 8.0, 8.0, 8.0])
assert adjust.best(x, y) == 7.7
|
mit
| 893,179,589,196,588,400
| 27.47482
| 84
| 0.456544
| false
| 3.07776
| true
| false
| false
|
maxivanoff/fftoolbox-app
|
q/fftoolbox/multipole.py
|
1
|
12213
|
import logging
import numpy as np
from copy import deepcopy
from numpy.linalg import norm
from scipy.special import sph_harm as Y
mult_logger = logging.getLogger('multipole')
def Rlm(l, m, r, theta, phi):
return r**l * np.sqrt(4 * np.pi / (2 * l + 1)) * Y(m, l, theta, phi)
def Rlmc(l, m, r, theta, phi):
return r**l * np.sqrt(4 * np.pi / (2 * l + 1)) * Ylmc(l, m, theta, phi)
def Rlms(l, m, r, theta, phi):
return r**l * np.sqrt(4 * np.pi / (2 * l + 1)) * Ylms(l, m, theta, phi)
def Ylmc(l, m, theta, phi):
#v = np.sqrt(0.5) * (np.conj(Y(m, l, theta, phi)) + Y(m, l, theta, phi))
v = np.sqrt(0.5) * (Y(-m, l, theta, phi) + (-1)**m*Y(m, l, theta, phi))
#v = np.sqrt(0.5) * ((-1)**m*Y(-m, l, theta, phi) + Y(m, l, theta, phi))
if abs(v.imag) > 0.0001: raise ValueError("Non-zero imaginary part in Ylmc")
return v.real
def Ylms(l, m, theta, phi):
#v = 1j * np.sqrt(0.5) * (np.conj(Y(m, l, theta, phi)) - Y(m, l, theta, phi))
#v = 1j * np.sqrt(0.5) * (Y(-m, l, theta, phi) - (-1)**m*Y(m, l, theta, phi))
v = 1j * np.sqrt(0.5) * (-(-1)**m*Y(-m, l, theta, phi) + Y(m, l, theta, phi))
if abs(v.imag) > 0.0001: raise ValueError("Non-zero imaginary part in Ylms")
return v.real
class GroupOfAtoms(object):
def __init__(self, name=None):
self.name = name
self.atoms = list()
self.i = -1
def build_Pymol_rep(self, vmax=1.,r_sphere=0.2):
s = 'from pymol.cgo import *\nfrom pymol import cmd\nobj = [ BEGIN, LINES, ]\n' % (WORKDIR, geometry)
for site in self.sites:
q = self.molecule.ff.charges[s.name]
if q is None:
s_color = 'x = 0.0\ncolor = [COLOR, 1-x, 1-x, 1]\n'
elif q >= 0:
s_color = 'x = %f\ncolor = [COLOR, 1, 1-x, 1-x]\n' % (q/vmax)
elif q < 0:
s_color = 'x = %f\ncolor = [COLOR, 1-x, 1-x, 1]\n' % (-q/vmax)
s_sphere = 'sphere = [ SPHERE, %f, %f, %f,%f]\n' % (s.x, s.y, s.z, r_sphere)
s = s + s_color + s_sphere + 'obj += color+sphere\n'
s = s + 'obj.append(END)\ncmd.load_cgo(obj,"cgo01")\n'
file = open(filename,'w')
file.write(s)
file.close()
def set_sym_sites(self):
sites = {}
self.sym_sites = []
for i, name in enumerate(self.sites_names_eq):
if not name in sites:
sites[name] = i
self.sym_sites.append(sites[name])
def get_coordinates(self):
crds = np.zeros((len(self.sites), 3))
for i, s in enumerate(self.sites):
crds[i][:] = s.coordinates[:]
return crds
def get_sites(self, name):
return filter(lambda s: s.name==name, self.sites)
def get_atoms_by_element(self, element):
return filter(lambda a: a.element==element, self.atoms)
def get_atom(self, index):
return next(a for a in self.atoms if a.index==index)
@property
def atoms_names_noneq(self):
return [a.name for a in self.atoms_noneq]
@property
def atoms_names_eq(self):
return [a.name for a in self.atoms]
@property
def sites_names_noneq(self):
return [s.name for s in self.sites_noneq]
@property
def sites_names(self):
return self.sites_names_noneq
@property
def sites_names_eq(self):
return [s.name for s in self.sites]
@property
def sites(self):
sites = []
for atom in self:
sites += atom.sites
return sites
@property
def sites_noneq(self):
sites = []
for s in self.sites:
if not s.name in [ss.name for ss in sites]:
sites.append(s)
return sites
@property
def atoms_noneq(self):
atoms = []
for a in self.atoms:
if not a.name in [aa.name for aa in atoms]:
atoms.append(a)
return atoms
def __iter__(self):
return self
def next(self):
if self.i < len(self.atoms)-1:
self.i += 1
return self.atoms[self.i]
else:
self.i = -1
raise StopIteration
class Multipole(GroupOfAtoms):
"""
This is Multipole
"""
def __init__(self, name=None, origin=None):
GroupOfAtoms.__init__(self, name)
self.origin = origin
def set_multipole_matrix(self, multipoles=('cartesian', 2)):
if multipoles[0] == 'cartesian':
multipole = Cartesian(multipoles[1], self.get_coordinates(), self.sym_sites, self.origin)
elif multipoles[0] == 'spherical':
multipole = Spherical(multipoles[1], self.get_coordinates(), self.sym_sites, self.origin)
self.l = multipoles[1]
self.multipoles_names = multipole.names
self.QtoM = multipole.rotation_matrix_direct
self.QtoM_normed = np.zeros(self.QtoM.shape)
for i, u in enumerate(self.QtoM):
self.QtoM_normed[i,:] = u/np.linalg.norm(u)
self.MtoQ = multipole.rotation_matrix_inverse
def charges_to_multipoles(self, charges):
Q = np.array([])
for name in self.sites_names_noneq:
Q = np.append(Q, charges[name])
M = np.dot(self.QtoM, Q)
multipoles = {}
for multipole, m_value in zip(self.multipoles_names, M):
multipoles[multipole] = m_value
return multipoles
def multipoles_to_charges(self, multipoles):
if self.MtoQ is None:
raise ValueError('Cannot convert multipoles to charges')
M = np.array([])
for multipole in self.multipoles_names:
M = np.append(M, multipoles[multipole])
Q = np.dot(self.MtoQ, M)
charges = {}
for name, q_value in zip(self.sites_names_noneq, Q):
charges[name] = q_value
return charges
class MultipoleMatrix(object):
def __init__(self, sym_sites=None, formula=None):
# build matrix
rotation_matrix = np.zeros((len(self.names), len(sym_sites)))
for i, m_name in enumerate(self.names):
rotation_matrix[i][:] = formula.u(m_name).real
# reduce matrix
self.rotation_matrix_direct = np.zeros((len(self.names), max(sym_sites)+1))
for i, _ in enumerate(self.names):
self.rotation_matrix_direct[i] = np.bincount(sym_sites, weights=rotation_matrix[i])
try:
self.rotation_matrix_inverse = np.linalg.inv(self.rotation_matrix_direct)
except np.linalg.LinAlgError:
self.rotation_matrix_inverse = None
mult_logger.debug("Multipole conversion matrix is set up.\nmultipoles = %s; total number of components: %i \nQ to M matrix: %s" % (self.names, len(self.names), self.rotation_matrix_direct.shape))
class Spherical(MultipoleMatrix):
def __init__(self, l=None, coordinates=None, sym_sites=None, origin=None):
try:
self.names = []
for ll in xrange(l):
for mm in xrange(ll+1):
if mm==0:
self.names.append('%i%i' % (ll, mm))
else:
self.names.append('%i%ic' % (ll, mm))
self.names.append('%i%is' % (ll, mm))
except TypeError:
self.names = l
#cartesian to spherical (r, theta, phi) = (r, azimuth, polar)
def arctan(a,b):
if a==b==0:
return 0.
if b==0:
return (-1)*np.pi*np.sign(a)/2
else:
return np.arctan(a/b)
spherical = np.zeros(coordinates.shape)
x, y, z = coordinates[:,0], coordinates[:,1], coordinates[:,2]
#r = np.sqrt(x**2 + y**2 + z**2)
#phi = np.arccos(z/r)
#theta = np.array([])
#for xx, yy in zip(x,y):
# if yy>=0 and xx>0:
# s = 0
# if xx<=0:
# s = np.pi
# if xx>0 and yy<0:
# s = 2*np.pi
# if xx==0 and yy==0:
# s = 0
# theta = np.append(theta, arctan(yy,xx) + s)
#spherical[:,0] = r
#spherical[:,1] = theta
#spherical[:,2] = phi
xy2 = x**2 + y**2 # x2 + y2
spherical[:,0] = np.sqrt(xy2 + z**2) # r2 = x2 + y2 + z2
spherical[:,1] = np.arctan2(y, x) # theta = arctan(y/x)
spherical[:,2] = np.arctan2(np.sqrt(xy2), z) # phi = arctan(xy/z)
formula = SphericalFormulas(spherical, origin)
MultipoleMatrix.__init__(self, sym_sites, formula)
class Cartesian(MultipoleMatrix):
def __init__(self, l=None, coordinates=None, sym_sites=None, origin=None):
self.names = []
for i in xrange(l+1):
self.names += self.l_to_names(i)
formula = CartesianFormulas(coordinates, origin)
MultipoleMatrix.__init__(self, sym_sites, formula)
def l_to_names(self, l):
if l == 0: return ['charge']
if l == 1: return 'X Y Z'.split()
if l == 2: return 'XX YY ZZ XY XZ YZ'.split()
class Formulas(dict):
def __init__(self, coordinates=None, origin=None):
self.coordinates = coordinates
if origin == None:
self.origin = np.zeros(3)
else:
self.origin = origin
dict.__init__(self)
class SphericalFormulas(Formulas):
def __init__(self, coordinates=None, origin=None):
Formulas.__init__(self, coordinates, origin)
self[0] = Rlm
self['c'] = Rlmc
self['s'] = Rlms
def u(self, m_name):
l, m = [int(t) for t in m_name[:2]]
try:
x = m_name[2]
except IndexError:
x = 0
u = np.array([])
for crds in self.coordinates:
r, theta, phi = crds
u = np.append(u, self[x](l, m, r, theta, phi))
return u
class CartesianFormulas(Formulas):
def __init__(self, coordinates=None, origin=None):
Formulas.__init__(self, coordinates, origin)
self[0] = self.total_charge
self[1] = self.dipole
self[2] = self.quadrupole
self[3] = self.hexadecapole
def name_to_num(self, m_name):
def convert(a):
if a == 'X': return 0
if a == 'Y': return 1
if a == 'Z': return 2
if m_name == 'charge':
return
else:
return [convert(a) for a in m_name]
def u(self, m_name):
components = self.name_to_num(m_name)
if m_name == 'charge': c = 0
else: c = len(m_name)
u = np.array([])
for crds in self.coordinates:
u = np.append(u, self[c](crds, components))
return u
def total_charge(self, crds, components):
return 1.
def dipole(self, crds, components):
c = components[0]
return crds[c] - self.origin[c]
def quadrupole(self, crds, components):
a2 = np.sum(crds**2)
m, n = components
am = crds[m] - self.origin[m]
an = crds[n] - self.origin[n]
return 3.0 / 2.0 * am * an - 0.5 * a2 * self.delta(m,n)
def octapole(self, crds, components):
m, n, k = components
a2 = np.sum(crds**2)
am = crds[m] - self.origin[m]
an = crds[n] - self.origin[n]
ak = crds[k] - self.origin[k]
return 5. / 2. * am * an * ak - 0.5 * a2 * (am * self.delta(n,k) + an * self.delta(m,n) + ak * self.delta(m,n))
def hexadecapole(self, crds, components):
m, n, k, l = components
am = crds[m] - self.origin[m]
an = crds[n] - self.origin[n]
ak = crds[k] - self.origin[k]
al = crds[l] - self.origin[l]
return 1. / (1. * 2. * 3. * 4.) * (105. * am * an * ak * al - 15. * a2 * (am * an * self.delta(k,l) + am * ak * self.delta(n,l) + am * al * self.delta(n,k) + an * ak * self.delta(m,l) + an * al * self.delta(m,k) + ak * al * self.delta(m,n)) + 3. * a2**2 * (self.delta(m,n) * self.delta(k,l) + self.delta(m,k) * self.delta(n,l) + self.delta(m,l) * self.delta(n,k)))
def delta(self, i, j):
if i==j: return 1
else: return 0
|
gpl-2.0
| 6,099,882,326,194,920,000
| 33.794872
| 372
| 0.524032
| false
| 3.106843
| false
| false
| false
|
woobe/h2o
|
py/testdir_multi_jvm/test_rf_1ktrees_job_cancel_many_fvec.py
|
1
|
2219
|
import unittest, time, sys, random
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_import as h2i, h2o_jobs, h2o_rf
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(3)
else:
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_1ktrees_job_cancel_many_fvec(self):
h2o.beta_features = True
SYNDATASETS_DIR = h2o.make_syn_dir()
# always match the run below!
# just using one file for now
for x in [1000]:
shCmdString = "perl " + h2o.find_file("syn_scripts/parity.pl") + " 128 4 "+ str(x) + " quad " + SYNDATASETS_DIR
h2o.spawn_cmd_and_wait('parity.pl', shCmdString.split(),4)
csvFilename = "parity_128_4_" + str(x) + "_quad.data"
csvFilename = "parity_128_4_" + str(1000) + "_quad.data"
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
hex_key = csvFilename + ".hex"
parseResult = h2o_cmd.parseResult = h2i.import_parse(path=csvPathname, schema='put', hex_key=hex_key, timeoutSecs=30)
print "kick off jobs, then cancel them"
for trial in range (1,5):
# random 0 or 1 delay
delay = random.uniform(0,1)
time.sleep(delay)
h2o.verboseprint("Trial", trial)
start = time.time()
h2o_cmd.runRF(parseResult=parseResult, trees=trial, max_depth=50, rfView=False, noPoll=True, timeoutSecs=30, retryDelaySecs=0.25)
print "RF #", trial, "started on ", csvFilename, 'took', time.time() - start, 'seconds'
### h2o_jobs.cancelAllJobs(timeoutSecs=10)
h2o.check_sandbox_for_errors()
# do one last good one
rfView = h2o_cmd.runRF(parseResult=parseResult, trees=trial, max_depth=50, timeoutSecs=600, retryDelaySecs=3)
(classification_error, classErrorPctList, totalScores) = h2o_rf.simpleCheckRFView(rfv=rfView, ntree=trial)
if __name__ == '__main__':
h2o.unit_main()
|
apache-2.0
| -7,085,951,831,473,397,000
| 37.258621
| 141
| 0.604777
| false
| 3.143059
| false
| false
| false
|
juliosmelo/soldo
|
utils/settings.py
|
1
|
1331
|
PGS_TOKEN = 'C888EE7F420841CF92D0B0063EDDFC7D'
PGS_EMAIL = 'pagseguro@panfleteria.com.br'
# from datetime import datetime
# from datetime import date
# from datetime import timedelta
# dates = [d0]
# dates_two = list()
# def date_paginator(x, y):
# print x, y
# if pages == 1 and pages_mods == 0:
# _date = d0 + timedelta(days=30)
# date_paginator(d0, _date)
# else:
# for i in range(pages):
# _date = d0 + timedelta(days=30 * (i + 1))
# dates.append(_date)
# if pages_mods > 0 and pages_mods < 30:
# new_date = dates[-1:][0] + timedelta(days=pages_mods)
# dates.append(new_date)
# if dates:
# for i in range(len(dates) - 1):
# date_paginator(dates[i], dates[i + 1])
# class DateRangePagination:
# """docstring for DateRangePagination"""
# def __init__(self, initial_date):
# self.initial_date = datetime.strptime(initial_date, "%Y-%m-%d").date()
# self.dates = [self.initial_date]
# self.date_limit = datetime.now().date()
# def get_ranges(self):
# print self.initial_date
# def set_ranges():
# d0 = date(2008, 8, 18)
# d1 = date(2008, 11, 18)
# delta = d1 - d0
# pages = delta.days / 30
# pages_mods = delta.days % 30
# pass
# def get_days(self,):
# pass
|
mit
| 1,225,591,867,755,594,800
| 23.666667
| 80
| 0.574005
| false
| 2.819915
| false
| false
| false
|
MichaelAnckaert/Hermes
|
message.py
|
1
|
1631
|
"""Message functionality for Hermes"""
from datetime import datetime
import json
__author__ = "Michael Anckaert"
__copyright__ = "Copyright 2012, Michael Anckaert"
__credits__ = ["Michael Anckaert"]
__license__ = "GPLv3"
__version__ = "0.0.1"
__maintainer__ = "Michael Anckaert"
__email__ = "michael.anckaert@sinax.be"
__status__ = "Development"
class MessageType(object):
types = {}
def __init__(self, name):
if name in MessageType.types:
print " W: Message type '{0}' already exists".format(name)
raise ValueError("Message type '{}' already exists.".format(name))
self.name = name
self.rest_enabled = False
MessageType.types['name'] = self
def enable_rest(self):
self.rest_enabled = True
def disable_rest(self):
self.rest_enabled = False
def get_message_type(self, name):
if name in MessageType.types.items():
return MessageType.types[name]
else:
return None
class Message(object):
def __init__(self, type, content):
if MessageType.get_message_type(type):
self.type = type
self.content = content
self.id = None
self.status = "UNKNOWN"
self.received = datetime.now().strftime("%d-%m-%Y %H:%M")
self.response = None
return
print " W: Unknown message type '{0}' ".format(type)
raise ValueError("Wrong message type!")
def __str__(self):
return json.dumps({'message': {'id': self.id, 'status': self.status, 'received': self.received, 'response': self.response}})
|
gpl-3.0
| 2,294,136,335,230,464,300
| 27.12069
| 132
| 0.591048
| false
| 3.828638
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.