repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
jeremiahyan/odoo
|
addons/website/models/ir_module_module.py
|
Python
|
gpl-3.0
| 19,278
| 0.004305
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
import os
from collections import OrderedDict
from odoo import api, fields, models
from odoo.addons.base.models.ir_model import MODULE_UNINSTALL_FLAG
from odoo.exceptions import MissingError
from odoo.http import request
_logger = logging.getLogger(__name__)
class IrModuleModule(models.Model):
_name = "ir.module.module"
_description = 'Module'
_inherit = _name
# The order is important because of dependencies (page need view, menu need page)
_theme_model_names = OrderedDict([
('ir.ui.view', 'theme.ir.ui.view'),
('ir.asset', 'theme.ir.asset'),
('website.page', 'theme.website.page'),
('website.menu', 'theme.website.menu'),
('ir.attachment', 'theme.ir.attachment'),
])
_theme_translated_fields = {
'theme.ir.ui.view': [('theme.ir.ui.view,arch', 'ir.ui.view,arch_db')],
'theme.website.menu': [('theme.website.menu,name', 'website.menu,name')],
}
image_ids = fields.One2many('ir.attachment', 'res_id',
domain=[('res_model', '=', _name), ('mimetype', '=like', 'image/%')],
string='Screenshots', readonly=True)
# for kanban view
is_installed_on_current_website = fields.Boolean(compute='_compute_is_installed_on_current_website')
def _compute_is_installed_on_current_website(self):
"""
Compute for every theme in ``self`` if the current website is using it or not.
This method does not take dependencies into account, because if it did, it would show
the current website as having multiple different themes installed at the same time,
which would be confusing for the user.
"""
for module in self:
module.is_installed_on_current_website = module == self.env['website'].get_current_website().theme_id
def write(self, vals):
"""
Override to correctly upgrade themes after upgrade/installation of modules.
# Install
If this theme wasn't installed before, then load it for every website
for which it is in the stream.
eg. The very first installation of a theme on a website will trigger this.
eg. If a website uses theme_A and we install sale, then theme_A_sale will be
autoinstalled, and in this case we need to load theme_A_sale for the website.
# Upgrade
There are 2 cases to handle when upgrading a theme:
* When clicking on the theme upgrade button on the interface,
in which case there will be an http request made.
-> We want to upgrade the current website only, not any other.
* When upgrading with -u, in which case no request should be set.
-> We want to upgrade every website using this theme.
"""
for module in self:
if module.name.startswith('theme_') and vals.get('state') == 'installed':
_logger.info('Module %s has been loaded as theme template (%s)' % (module.name, module.state))
if module.state in ['to install', 'to upgrade']:
websites_to_update = module._theme_get_stream_website_ids()
if module.state == 'to upgrade' and request:
Website = self.env['website']
current_website = Website.get_current_website()
websites_to_update = current_website if current_website in websites_to_update else Website
for website in websites_to_update:
module._theme_load(website)
return super(IrModuleModule, self).write(vals)
def _get_module_data(self, model_name):
"""
Return every theme template model of type ``model_name`` for every theme in ``self``.
:param model_name: string with the technical name of the model for which to get data.
(the name must be one of the keys present in ``_theme_model_names``)
:return: recordset of theme template models (of type defined by ``model_name``)
"""
theme_model_name = self._theme_model_names[model_name]
IrModelData = self.env['ir.model.data']
records = self.env[theme_model_name]
for module in self:
imd_ids = IrModelData.search([('module', '=', module.name), ('model', '=', theme_model_name)]).mapped('res_id')
records |= self.env[theme_model_name].with_context(active_test=False).browse(imd_ids)
return records
def _update_records(self, model_name, website):
"""
This method:
- Find and update existing records.
For each model, overwrite the fields that are defined in the template (except few
cases such as active) but keep inherited models to not lose customizations.
- Create new records from templates for those that didn't exist.
- Remove the models that existed before but are not in the template anymore.
See _theme_cleanup for more information.
There is a special 'while' loop around the 'for' to be able queue back models at the end
of the iteration when they have unmet dependencies. Hopefully the dependency will be
found after all models have been processed, but if it's not the case an error message will be shown.
:param model_name: string with the technical name of the model to handle
(the name must be one of the keys present in ``_theme_model_names``)
:param website: ``website`` model for which the records have to be updated
:raise MissingError: if there is a missing dependency.
"""
self.ensure_one()
remaining = self._get_module_data(model_name)
last_len = -1
while (len(remaining) != last_len):
last_len = len(remaining)
for rec in remaining:
rec_data = rec._convert_to_base_model(website)
if not rec_data:
_logger.info('Record queued: %s' % rec.display_name)
continue
find = rec.with_context(active_test=False).mapped('copy_ids').filtered(lambda m: m.website_id == website)
# special case for atta
|
chment
# if module B override attachment from dependence A, we update it
if not find and model_name == 'ir.attachment':
find = rec.copy_ids.search([('key', '=', rec.key), ('website_id', '=', website.id)])
|
if find:
imd = self.env['ir.model.data'].search([('model', '=', find._name), ('res_id', '=', find.id)])
if imd and imd.noupdate:
_logger.info('Noupdate set for %s (%s)' % (find, imd))
else:
# at update, ignore active field
if 'active' in rec_data:
rec_data.pop('active')
if model_name == 'ir.ui.view' and (find.arch_updated or find.arch == rec_data['arch']):
rec_data.pop('arch')
find.update(rec_data)
self._post_copy(rec, find)
else:
new_rec = self.env[model_name].create(rec_data)
self._post_copy(rec, new_rec)
remaining -= rec
if len(remaining):
error = 'Error - Remaining: %s' % remaining.mapped('display_name')
_logger.error(error)
raise MissingError(error)
self._theme_cleanup(model_name, website)
def _post_copy(self, old_rec, new_rec):
self.ensure_one()
translated_fields = self._theme_translated_fields.get(old_rec._name, [])
for (src_field, dst_field) in translated_fields:
self._cr.execute("""INSERT INTO ir_translation (lang
|
fenderglass/Nano-Align
|
scripts/flip-blockades.py
|
Python
|
bsd-2-clause
| 2,322
| 0.004307
|
#!/usr/bin/env python2.7
#(c) 2015-2016 by Authors
#This file is a part of Nano-Align program.
#Released under the BSD license (see LICENSE file)
"""
Flips blockades signals according to the protein's AA order
"""
from __future__ import print_function
import sys
import os
nanoalign_root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(0, nanoalign_root)
from nanoalign.identifier import Identifier
from nanoalign.blockade import read_mat, write_mat
import nanoalign.signal_proc as sp
from nanoalign.model_loader import load_model
def flip(blockades, model_file):
"""
Flips blockades
"""
blockade_model = load_model(model_file)
identifier = Identifier(blockade_model)
peptide = blockades[0].peptide
clusters = sp.preprocess_blockades(blockades, cluster_size=1,
min_dwell=0.0, max_dwell=1000)
print("Num\tFwd_dst\tRev_dst\t\tNeeds_flip", file=sys.stderr)
num_reversed = 0
new_blockades = []
for num, cluster in enumerate(clusters):
discr_signal = sp.discretize(cluster.consensus, len(peptide))
fwd_dist = identifier.signal_protein_distance(discr_signal, peptide)
rev_dist = identifier.signal_protein_distance(discr_signal,
peptide[::-1])
print("{0}\t{1:5.2f}\t{2:5.2f}\t\t{3}"
.format(num + 1, fwd_dist, rev_dist, fwd_dist > rev_dist),
file=sys.stderr)
new_blockades.append(cluster.blockades[0])
if fwd_di
|
st > rev_dist:
new_blockades[-1].eventTrace = new_blockades[-1].eventTrace[::-1]
num_reversed += 1
print("Reversed:", num_reversed, "of", len(blockades), file=sys.stderr)
return new_blockades
def main():
if len(sys.argv) != 4:
|
print("usage: flip-blockades.py blockades_in model_file flipped_out\n\n"
"Orients blockade signals according to the AA order "
"in the protein of origin")
return 1
blockades_in = sys.argv[1]
blockades_out = sys.argv[3]
svr_file = sys.argv[2]
blockades = read_mat(blockades_in)
rev_blockades = flip(blockades, svr_file)
write_mat(rev_blockades, blockades_out)
return 0
if __name__ == "__main__":
sys.exit(main())
|
bataeves/kaggle
|
instacart/imba/arboretum_submition.py
|
Python
|
unlicense
| 15,544
| 0.005597
|
import gc
import pandas as pd
import numpy as np
import os
import arboretum
import json
import sklearn.metrics
from sklearn.metrics import f1_score, roc_auc_score
from sklearn.model_selection import train_test_split
from scipy.sparse import dok_matrix, coo_matrix
from sklearn.utils.multiclass import type_of_target
if __name__ == '__main__':
path = "data"
aisles = pd.read_csv(os.path.join(path, "aisles.csv"), dtype={'aisle_id': np.uint8, 'aisle': 'category'})
departments = pd.read_csv(os.path.join(path, "departments.csv"),
dtype={'department_id': np.uint8, 'department': 'category'})
order_prior = pd.read_csv(os.path.join(path, "order_products__prior.csv"), dtype={'order_id': np.uint32,
'product_id': np.uint16,
'add_to_cart_order': np.uint8,
'reordered': bool})
order_train = pd.read_csv(os.path.join(path, "order_products__train.csv"), dtype={'order_id': np.uint32,
'product_id': np.uint16,
'add_to_cart_order': np.uint8,
'reordered': bool})
orders = pd.read_csv(os.path.join(path, "orders.csv"), dtype={'order_id': np.uint32,
'user_id': np.uint32,
'eval_set': 'category',
'order_number': np.uint8,
'order_dow': np.uint8,
'order_hour_of_day': np.uint8
})
products = pd.read_csv(os.path.join(path, "products.csv"), dtype={'product_id': np.uint16,
'aisle_id': np.uint8,
'department_id': np.uint8})
product_embeddings = pd.read_pickle('data/product_embeddings.pkl')
embedings = list(range(32))
product_embeddings = prod
|
uct_embeddings[embedings + ['product_id']]
order_train = pd.read_pickle(os.path.join(path, 'chunk_0.pkl'))
order_test = order_train.loc[order_train.eval_set == "test", ['order_id', 'product_id']]
order_train = order_train.loc[order_train.eval_set == "train", ['order_id', 'product_id', 'reordered']]
product_periods = pd.read_pickle(os.path.join(path, 'product_perio
|
ds_stat.pkl')).fillna(9999)
print(order_train.columns)
###########################
prob = pd.merge(order_prior, orders, on='order_id')
print(prob.columns)
prob = prob.groupby(['product_id', 'user_id'])\
.agg({'reordered':'sum', 'user_id': 'size'})
print(prob.columns)
prob.rename(columns={'sum': 'reordered',
'user_id': 'total'}, inplace=True)
prob.reordered = (prob.reordered > 0).astype(np.float32)
prob.total = (prob.total > 0).astype(np.float32)
prob['reorder_prob'] = prob.reordered / prob.total
prob = prob.groupby('product_id').agg({'reorder_prob': 'mean'}).rename(columns={'mean': 'reorder_prob'})\
.reset_index()
prod_stat = order_prior.groupby('product_id').agg({'reordered': ['sum', 'size'],
'add_to_cart_order':'mean'})
prod_stat.columns = prod_stat.columns.levels[1]
prod_stat.rename(columns={'sum':'prod_reorders',
'size':'prod_orders',
'mean': 'prod_add_to_card_mean'}, inplace=True)
prod_stat.reset_index(inplace=True)
prod_stat['reorder_ration'] = prod_stat['prod_reorders'] / prod_stat['prod_orders']
prod_stat = pd.merge(prod_stat, prob, on='product_id')
# prod_stat.drop(['prod_reorders'], axis=1, inplace=True)
user_stat = orders.loc[orders.eval_set == 'prior', :].groupby('user_id').agg({'order_number': 'max',
'days_since_prior_order': ['sum',
'mean',
'median']})
user_stat.columns = user_stat.columns.droplevel(0)
user_stat.rename(columns={'max': 'user_orders',
'sum': 'user_order_starts_at',
'mean': 'user_mean_days_since_prior',
'median': 'user_median_days_since_prior'}, inplace=True)
user_stat.reset_index(inplace=True)
orders_products = pd.merge(orders, order_prior, on="order_id")
user_order_stat = orders_products.groupby('user_id').agg({'user_id': 'size',
'reordered': 'sum',
"product_id": lambda x: x.nunique()})
user_order_stat.rename(columns={'user_id': 'user_total_products',
'product_id': 'user_distinct_products',
'reordered': 'user_reorder_ratio'}, inplace=True)
user_order_stat.reset_index(inplace=True)
user_order_stat.user_reorder_ratio = user_order_stat.user_reorder_ratio / user_order_stat.user_total_products
user_stat = pd.merge(user_stat, user_order_stat, on='user_id')
user_stat['user_average_basket'] = user_stat.user_total_products / user_stat.user_orders
########################### products
prod_usr = orders_products.groupby(['product_id']).agg({'user_id': lambda x: x.nunique()})
prod_usr.rename(columns={'user_id':'prod_users_unq'}, inplace=True)
prod_usr.reset_index(inplace=True)
prod_usr_reordered = orders_products.loc[orders_products.reordered, :].groupby(['product_id']).agg({'user_id': lambda x: x.nunique()})
prod_usr_reordered.rename(columns={'user_id': 'prod_users_unq_reordered'}, inplace=True)
prod_usr_reordered.reset_index(inplace=True)
order_stat = orders_products.groupby('order_id').agg({'order_id': 'size'}) \
.rename(columns={'order_id': 'order_size'}).reset_index()
orders_products = pd.merge(orders_products, order_stat, on='order_id')
orders_products['add_to_cart_order_inverted'] = orders_products.order_size - orders_products.add_to_cart_order
orders_products['add_to_cart_order_relative'] = orders_products.add_to_cart_order / orders_products.order_size
data = orders_products.groupby(['user_id', 'product_id']).agg({'user_id': 'size',
'order_number': ['min', 'max'],
'add_to_cart_order': ['mean', 'median'],
'days_since_prior_order': ['mean', 'median'],
'order_dow': ['mean', 'median'],
'order_hour_of_day': ['mean', 'median'],
'add_to_cart_order_inverted': ['mean', 'median'],
'add_to_cart_order_relative': ['mean', 'median'],
'reordered': ['sum']})
data.columns = data.columns.droplevel(0)
data.columns = ['up_orders', 'up_first_order', 'up_last_order', 'up_mean_cart_position', 'up_median_cart_position',
'days_since_prior_order_mean', 'days_since_prior_or
|
codyparker/channels-obstruction
|
game/views/views.py
|
Python
|
mit
| 3,489
| 0.002006
|
from django.contrib.auth.forms import PasswordResetForm
from django.shortcuts import redirect
from django.views.generic import CreateView, TemplateView, View, FormView
from django.contrib.auth import authenticate, login
from game.forms import *
from game.models import User, Game
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm
from django.contrib.auth import REDIRECT_FIELD_NAME, login as auth_login, logout as auth_logout
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.contrib import
|
messages
import json
from dj
|
ango.contrib.auth import get_user
from django.shortcuts import get_object_or_404
class HomeView(TemplateView):
template_name = 'home.html'
def dispatch(self, request, *args, **kwargs):
# if logged in, send them to the lobby
if request.user.is_authenticated:
return redirect('/lobby/')
super(HomeView, self).dispatch(request, *args, **kwargs)
class CreateUserView(CreateView):
template_name = 'register.html'
form_class = UserCreationForm
success_url = '/lobby/'
def form_valid(self, form):
valid = super(CreateUserView, self).form_valid(form)
username, password = form.cleaned_data.get('username'), form.cleaned_data.get('password1')
new_user = authenticate(username=username, password=password)
login(self.request, new_user)
return valid
class LobbyView(TemplateView):
template_name = 'components/lobby/lobby.html'
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(LobbyView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(LobbyView, self).get_context_data(**kwargs)
# get current open games to prepopulate the list
# we're creating a list of games that contains just the id (for the link) and the creator
available_games = [{'creator': game.creator.username, 'id': game.pk} for game in Game.get_available_games()]
# for the player's games, we're returning a list of games with the opponent and id
player_games = Game.get_games_for_player(self.request.user)
return context
class GameView(TemplateView):
template_name = 'components/game/game.html'
game = None
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
# get the game by the id
self.game = Game.get_by_id(kwargs['game_id'])
user = get_user(request)
# check to see if the game is open and available for this user
# if this player is the creator, just return
if self.game.creator == user or self.game.opponent == user:
return super(GameView, self).dispatch(request, *args, **kwargs)
# if there is no opponent and the game is not yet completed,
# set the opponent as this user
if not self.game.opponent and not self.game.completed:
self.game.opponent = user
self.game.save()
return super(GameView, self).dispatch(request, *args, **kwargs)
else:
messages.add_message(request, messages.ERROR, 'Sorry, the selected game is not available.')
return redirect('/lobby/')
def get_context_data(self, **kwargs):
context = super(GameView, self).get_context_data(**kwargs)
context['game'] = self.game
return context
|
CalthorpeAnalytics/urbanfootprint
|
footprint/client/configuration/scag_dm/base/jurisdiction_boundary.py
|
Python
|
gpl-3.0
| 1,720
| 0
|
# UrbanFootprint v1.5
# Copyright (C) 2017 C
|
althorpe Analytics
#
# This file is part o
|
f UrbanFootprint version 1.5
#
# UrbanFootprint is distributed under the terms of the GNU General
# Public License version 3, as published by the Free Software Foundation. This
# code is distributed WITHOUT ANY WARRANTY, without implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License v3 for more details; see <http://www.gnu.org/licenses/>.
from django.contrib.gis.db import models
from footprint.main.models.geospatial.feature import Feature
__author__ = 'calthorpe_analytics'
class JurisdictionBoundary(Feature):
city_uid = models.IntegerField(null=True)
city = models.CharField(max_length=50, null=True)
county = models.CharField(max_length=50, null=True)
county_id = models.IntegerField(null=True)
pop12 = models.DecimalField(max_digits=14, decimal_places=2)
pop20 = models.DecimalField(max_digits=14, decimal_places=2)
pop35 = models.DecimalField(max_digits=14, decimal_places=2)
pop40 = models.DecimalField(max_digits=14, decimal_places=2)
hh12 = models.DecimalField(max_digits=14, decimal_places=2)
hh20 = models.DecimalField(max_digits=14, decimal_places=2)
hh35 = models.DecimalField(max_digits=14, decimal_places=2)
hh40 = models.DecimalField(max_digits=14, decimal_places=2)
emp12 = models.DecimalField(max_digits=14, decimal_places=2)
emp20 = models.DecimalField(max_digits=14, decimal_places=2)
emp35 = models.DecimalField(max_digits=14, decimal_places=2)
emp40 = models.DecimalField(max_digits=14, decimal_places=2)
class Meta(object):
abstract = True
app_label = 'main'
|
yarikoptic/fail2ban
|
fail2ban/server/filtersystemd.py
|
Python
|
gpl-2.0
| 8,738
| 0.029068
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
# vi: set ft=python sts=4 ts=4 sw=4 noet :
# This file is part of Fail2Ban.
#
# Fail2Ban is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Fail2Ban is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Fail2Ban; if not, write to the Free Software
# Foundation, Inc., 51
|
Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
__author__ = "Steven Hiscocks"
__copyright__ = "Copyright (c) 2013 Steven Hiscocks"
__license__ = "GPL"
import datetime
import time
from distutils
|
.version import LooseVersion
from systemd import journal
if LooseVersion(getattr(journal, '__version__', "0")) < '204':
raise ImportError("Fail2Ban requires systemd >= 204")
from .failmanager import FailManagerEmpty
from .filter import JournalFilter, Filter
from .mytime import MyTime
from ..helpers import getLogger, logging, splitwords
# Gets the instance of the logger.
logSys = getLogger(__name__)
##
# Journal reader class.
#
# This class reads from systemd journal and detects login failures or anything
# else that matches a given regular expression. This class is instantiated by
# a Jail object.
class FilterSystemd(JournalFilter): # pragma: systemd no cover
##
# Constructor.
#
# Initialize the filter object with default values.
# @param jail the jail object
def __init__(self, jail, **kwargs):
jrnlargs = FilterSystemd._getJournalArgs(kwargs)
JournalFilter.__init__(self, jail, **kwargs)
self.__modified = 0
# Initialise systemd-journal connection
self.__journal = journal.Reader(**jrnlargs)
self.__matches = []
self.setDatePattern(None)
self.ticks = 0
logSys.debug("Created FilterSystemd")
@staticmethod
def _getJournalArgs(kwargs):
args = {'converters':{'__CURSOR': lambda x: x}}
try:
args['path'] = kwargs.pop('journalpath')
except KeyError:
pass
try:
args['files'] = kwargs.pop('journalfiles')
except KeyError:
pass
else:
import glob
p = args['files']
if not isinstance(p, (list, set, tuple)):
p = splitwords(p)
files = []
for p in p:
files.extend(glob.glob(p))
args['files'] = list(set(files))
try:
args['flags'] = kwargs.pop('journalflags')
except KeyError:
pass
return args
##
# Add a journal match filters from list structure
#
# @param matches list structure with journal matches
def _addJournalMatches(self, matches):
if self.__matches:
self.__journal.add_disjunction() # Add OR
newMatches = []
for match in matches:
newMatches.append([])
for match_element in match:
self.__journal.add_match(match_element)
newMatches[-1].append(match_element)
self.__journal.add_disjunction()
self.__matches.extend(newMatches)
##
# Add a journal match filter
#
# @param match journalctl syntax matches in list structure
def addJournalMatch(self, match):
newMatches = [[]]
for match_element in match:
if match_element == "+":
newMatches.append([])
else:
newMatches[-1].append(match_element)
try:
self._addJournalMatches(newMatches)
except ValueError:
logSys.error(
"Error adding journal match for: %r", " ".join(match))
self.resetJournalMatches()
raise
else:
logSys.info("Added journal match for: %r", " ".join(match))
##
# Reset a journal match filter called on removal or failure
#
# @return None
def resetJournalMatches(self):
self.__journal.flush_matches()
logSys.debug("Flushed all journal matches")
match_copy = self.__matches[:]
self.__matches = []
try:
self._addJournalMatches(match_copy)
except ValueError:
logSys.error("Error restoring journal matches")
raise
else:
logSys.debug("Journal matches restored")
##
# Delete a journal match filter
#
# @param match journalctl syntax matches
def delJournalMatch(self, match):
if match in self.__matches:
del self.__matches[self.__matches.index(match)]
self.resetJournalMatches()
else:
raise ValueError("Match not found")
logSys.info("Removed journal match for: %r" % " ".join(match))
##
# Get current journal match filter
#
# @return journalctl syntax matches
def getJournalMatch(self):
return self.__matches
def uni_decode(self, x):
v = Filter.uni_decode(x, self.getLogEncoding())
return v
##
# Get journal reader
#
# @return journal reader
def getJournalReader(self):
return self.__journal
##
# Format journal log entry into syslog style
#
# @param entry systemd journal entry dict
# @return format log line
def formatJournalEntry(self, logentry):
# Be sure, all argument of line tuple should have the same type:
uni_decode = self.uni_decode
logelements = []
v = logentry.get('_HOSTNAME')
if v:
logelements.append(uni_decode(v))
v = logentry.get('SYSLOG_IDENTIFIER')
if not v:
v = logentry.get('_COMM')
if v:
logelements.append(uni_decode(v))
v = logentry.get('SYSLOG_PID')
if not v:
v = logentry.get('_PID')
if v:
logelements[-1] += ("[%i]" % v)
logelements[-1] += ":"
if logelements[-1] == "kernel:":
if '_SOURCE_MONOTONIC_TIMESTAMP' in logentry:
monotonic = logentry.get('_SOURCE_MONOTONIC_TIMESTAMP')
else:
monotonic = logentry.get('__MONOTONIC_TIMESTAMP')[0]
logelements.append("[%12.6f]" % monotonic.total_seconds())
msg = logentry.get('MESSAGE','')
if isinstance(msg, list):
logelements.append(" ".join(uni_decode(v) for v in msg))
else:
logelements.append(uni_decode(msg))
logline = " ".join(logelements)
date = logentry.get('_SOURCE_REALTIME_TIMESTAMP',
logentry.get('__REALTIME_TIMESTAMP'))
logSys.debug("Read systemd journal entry: %r" %
"".join([date.isoformat(), logline]))
## use the same type for 1st argument:
return ((logline[:0], date.isoformat(), logline),
time.mktime(date.timetuple()) + date.microsecond/1.0E6)
def seekToTime(self, date):
if not isinstance(date, datetime.datetime):
date = datetime.datetime.fromtimestamp(date)
self.__journal.seek_realtime(date)
##
# Main loop.
#
# Peridocily check for new journal entries matching the filter and
# handover to FailManager
def run(self):
if not self.getJournalMatch():
logSys.notice(
"Jail started without 'journalmatch' set. "
"Jail regexs will be checked against all journal entries, "
"which is not advised for performance reasons.")
# Seek to now - findtime in journal
start_time = datetime.datetime.now() - \
datetime.timedelta(seconds=int(self.getFindTime()))
self.seekToTime(start_time)
# Move back one entry to ensure do not end up in dead space
# if start time beyond end of journal
try:
self.__journal.get_previous()
except OSError:
pass # Reading failure, so safe to ignore
while self.active:
# wait for records (or for timeout in sleeptime seconds):
self.__journal.wait(self.sleeptime)
if self.idle:
# because journal.wait will returns immediatelly if we have records in journal,
# just wait a little bit here for not idle, to prevent hi-load:
time.sleep(self.sleeptime)
continue
self.__modified = 0
while self.active:
logentry = None
try:
logentry = self.__journal.get_next()
except OSError as e:
logSys.error("Error reading line from systemd journal: %s",
e, exc_info=logSys.getEffectiveLevel() <= logging.DEBUG)
self.ticks += 1
if logentry:
self.processLineAndAdd(
*self.formatJournalEntry(logentry))
self.__modified += 1
if self.__modified >= 100: # todo: should be configurable
break
else:
break
if self.__modified:
try:
while True:
ticket = self.failManager.toBan()
self.jail.putFailTicket(ticket)
except FailManagerEmpty:
self.failManager.cleanup(MyTime.time()
|
andrewruba/YangLab
|
JPC simulations 2019/Figure 7 - symmetry compression/simulation.py
|
Python
|
gpl-3.0
| 13,921
| 0.037354
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 20 10:52:38 2017
@author: Andrew Ruba
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import math
import random
import csv
import os
from numpy.random import choice
import numpy as np
from scipy.optimize import curve_fit
import time
import matplotlib.pyplot as plt
from scipy import stats
## below arrays are for saving simulation data for statistical analysis
global gausslist
gausslist = []
global bimodallist
bimodallist = []
global bimodalmean
bimodalmean = []
global bimodalsd
bimodalsd = []
global bimodalheight
bimodalheight = []
global bimodalauc
bimodalauc = []
def sim(gui, PTNUM, RADIUS, PREC, ITER, BINSIZE, PERCERROR, A, B, ROTATION):
def simulation(num_points, radius, dr, ss, mm, aa, bb, rotation):
def area_fn(X):
X = float(X)
A = -(dr**2)*np.pi
B = dr*2*np.pi
return X*B+A
def gauss_fn(x, s, m):
a = area_fn(m)
x = float(x)
s = float(s)
m = float(m)
return a*np.e**(-(x-m)**2.0/(2.0*s**2.0))
def combine(x):
s = ss
m = mm
return (area_fn(x) * gauss_fn(x, s, m))
##starting with perfect x,y and adding error
xydata = []
mm = mm + 0.00001
while len(xydata) < num_points:
theta = np.random.random()*360.0
## precision distribution sampling
# ss = choice([3,5,7,9], p=[0.1475,0.2775,0.3075,0.2675])
# ss = choice([4.5,5.5,6.5,7.5,8.5,9.5], p=[0.02,0.05,0.07,0.11,0.2,0.55])
y_prec = np.random.normal(0.0, ss)
z_prec = np.random.normal(0.0, ss)
yy = aa*np.cos(theta)
zz = bb*np.sin(theta)
yyy = yy*np.cos(np.radians(rotation))+zz*np.sin(np.radians(rotation))
zzz = -yy*np.sin(np.radians(rotation))+zz*np.cos(np.radians(rotation))
xydata.append((yyy+y_prec, zzz+z_prec))
def gen_matrix(r, d_r):
##'be' is short for bin edges
if r%d_r > 0:
be = range(0, r+r%d_r, d_r)
else:
be = range(0, r+d_r, d_r)
matrix = []
for i in range(len(be)-1):
matrix.append([])
x = 0
for i in range(len(matrix)):
for j in range(x):
matrix[i].append(0)
x += 1
##generate areas of sections closest to x axis
for i in range(len(matrix)):
theta = np.arccos(float(be[len(be)-2-i])/float(be[len(be)-1-i]))
arc_area = (theta/(2*np.pi)) * np.pi * float(be[len(be)-1-i])**2
tri_area = 0.5 * float(be[len(be)-2-i]) * (np.sin(theta) * float(be[len(be)-1-i]))
matrix[i].append(4 * (arc_area - tri_area))
##skipping factor
x = 2
##generate areas of layers going further out from x axis
while len(matrix[0]) < len(matrix):
for i in range(len(matrix) - len(matrix[0])):
num = 0
for j in range(len(matrix)):
for k in range(len(matrix[i]) + 1):
if j == i and k < len(matrix[i]):
num += matrix[j][k]
elif j > i:
num += matrix[j][k]
theta = np.arccos(float(be[len(be)-1-x-i])/float(be[len(be)-1-i]))
arc_area = (theta/(2*np.pi)) * np.pi * float(be[len(be)-1-i])**2
tri_area = 0.5 * float(be[len(be)-1-x-i]) * (np.sin(theta) * float(be[len(be)-1-i]))
matrix[i].append(4 * (arc_area - tri_area) - num)
x += 1
return matrix
def smoothdata(data, r, d_r):
"""smoothds data with 3 moving window and takes abs value average"""
smooth_data = []
r += 1
##comment out for smoothing
smooth_data = []
for i in range(len(data)):
smooth_data.append(data[i])
##adds + and - bins
final_smooth_data = []
for i in range(int(r/d_r)):
final_smooth_data.append(smooth_data[i] + smooth_data[len(smooth_data)-1-i])
return list(reversed(final_smooth_data))
def deconvolution(hv, be, r, d_r):
"""hv = hist_values, be = bin_edges"""
density = []
matrix = gen_matrix(r, d_r)
while len(hv) > len(matrix):
hv.pop()
while len(matrix) > len(hv):
matrix.pop()
rev_hv = list(reversed(hv))
x = 0
for i in range(len(rev_hv)):
##calculate how much to subtract from bin
density_sub = 0
y = 0
for j in range(x):
density_sub += density[y] * matrix[j][i]
y += 1
##calculate final bin value
density.append((rev_hv[i] - density_sub) / matrix[i][i])
x += 1
unrev_hv = list(reversed(density))
smooth_data = []
for i in range(len(unrev_hv)):
if i == 0 or i == (len(unrev_hv) - 1):
smooth_data.append(unrev_hv[i])
else:
smooth_data.append(np.average([unrev_hv[i-1], unrev_hv[i], unrev_hv[i+1]]))
return unrev_hv, smooth_data, hv
def make_hist(data, r, d_r):
hist_values, bin_edges = np.histogram(data, bins = 2 * int(r/d_r), range = (-r, r))
new_bin_edges = []
for i in bin_edges:
if i >= 0:
new_bin_edges.append(i)
new_hist_values = smoothdata(hist_values, r, d_r)
return new_hist_values, new_bin_edges
def csv_read(path):
with open(path, 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter = ',')
holdlist = []
for row in reader:
holdlist.append(float(row[1]))
return holdlist
jkl = []
for y,z in xydata:
jkl.append(y)
radius = int(np.floor(radius/dr))*dr
if num_points == PTNUM + 1:
## decide the proper bin size
minbinsize = 2
binsizes = []
binsizesdata = [[] for variable in range(1, int(PREC)+1)]
gui.message.set('0% done calculating ideal bin size...')
gui.update()
for binoptimization in range(10):
for binsize in range(1, int(PREC)+1):
if binsize >= minbinsize:
error = 0
# pr
|
int ('binsize ' + str(binsize))
jkl = []
mm = mm + 0.00001
while len(jkl) < num_points-1:
theta = np.random.random()*360.0
## precision distribution sampling
# ss = choice([3,5,7,9], p=[0.1475,0.2775,0.3075,0.2675])
# ss = choice([4.5,5.5,6.5,7.5,8.5,9.5],
|
p=[0.02,0.05,0.07,0.11,0.2,0.55])
y_prec = np.random.normal(0.0, ss)
jkl.append(mm*np.cos(theta)+y_prec)
a,b = make_hist(jkl, radius, binsize)
final_unsmooth, final_smooth, final_2d = deconvolution(a, b, radius, binsize)
holdlist = []
addZero = False
for val in list(reversed(final_unsmooth)):
if not addZero:
if val >= 0.0:
holdlist.append(val)
else:
addZero = True
holdlist.append(0.0)
else:
holdlist.append(0.0)
final_unsmooth = list(reversed(holdlist))
##rescale ideal data
matrix = gen_matrix(radius, binsize)
newmatrix = []
for i in matrix:
newmatrix.append(list(reversed(i)))
matrix = list(reversed(newmatrix))
# print (a)
# print (final_unsmooth)
while len(a) > len(matrix):
a.pop()
while len(matrix) > len(a):
matrix.pop()
for ncol in range(len(matrix[0])):
binsub = 0.0
for mcol in range(len(matrix)):
binsub += float(matrix[mcol][ncol]*final_unsmooth[mcol])
try:
if a[ncol] != 0.0:
# print (binsub)
error += np.square(a[ncol] - binsub) / a[ncol]
except:
pass
popped = a.pop()
while popped == 0:
popped = a.pop()
binsizesdata[binsize-1].append((error, len(a)+1,1-stats.chi2.cdf(error, len(a)+1),binsize))
else:
binsizesdata[binsize-1].appe
|
rutsky/letsencrypt
|
letsencrypt/validator.py
|
Python
|
apache-2.0
| 2,825
| 0.000354
|
"""Validators to determine the current webserver configuration"""
import logging
import socket
import requests
import zope.interface
from acme import crypto_util
from acme import errors as acme_errors
from letsencrypt import interfaces
logger = logging.getLogger(__name__)
class Validator(object):
# pylint: disable=no-self-use
"""Collection of functions to test a live webserver's configuration"""
zope.interface.implements(interfaces.IValidator)
def certificate(self, cert, name, alt_host=None, port=443):
"""Verifies the certificate presented at name is cert"""
host = alt_host if alt_host else socket.gethostbyname(name)
try:
presented_cert = crypto_util.probe_sni(name, host, port)
except acme_errors.Error as error:
logger.exception(error)
return False
return presented_cert.digest("sha256") == cert.digest("sha256")
def redirect(self, name, port=80, headers=None):
|
"""Test whether webserver redirects to secure connection."""
url = "http://{0}:{1}".format(name, port)
if headers:
response = requests.get(url, headers=headers, allow_redirects=False)
else:
response = requests.get(url,
|
allow_redirects=False)
if response.status_code not in (301, 303):
return False
redirect_location = response.headers.get("location", "")
if not redirect_location.startswith("https://"):
return False
if response.status_code != 301:
logger.error("Server did not redirect with permanent code")
return False
return True
def hsts(self, name):
"""Test for HTTP Strict Transport Security header"""
headers = requests.get("https://" + name).headers
hsts_header = headers.get("strict-transport-security")
if not hsts_header:
return False
# Split directives following RFC6797, section 6.1
directives = [d.split("=") for d in hsts_header.split(";")]
max_age = [d for d in directives if d[0] == "max-age"]
if not max_age:
logger.error("Server responded with invalid HSTS header field")
return False
try:
_, max_age_value = max_age[0]
max_age_value = int(max_age_value)
except ValueError:
logger.error("Server responded with invalid HSTS header field")
return False
# Test whether HSTS does not expire for at least two weeks.
if max_age_value <= (2 * 7 * 24 * 3600):
logger.error("HSTS should not expire in less than two weeks")
return False
return True
def ocsp_stapling(self, name):
"""Verify ocsp stapling for domain."""
raise NotImplementedError()
|
ccqpein/Arithmetic-Exercises
|
Depth-First-Search/DFS.py
|
Python
|
apache-2.0
| 3,928
| 0.000255
|
class Graph():
pointSet = []
# it should be a dict on {point:[list of point it connects]}
pathDic = dict()
pointTime = dict() # points' discover time and finish time
def __init__(self, points, paths):
if set(paths.keys()) - set(points):
print(
"Warning: Some points in path dict not exist in points set,\
initialize fail!")
return
self.pointSet = points
self.pointSet.sort() # sort points to alphabetical
self.pathDic = paths
# init each points discovery and finishing time list
self.pointTime = {key: [] for key in self.pointSet}
for i in self.pointSet:
try:
self.pathDic[i].sort() # sort paths list to alphabetical
except KeyError: # if some point has no path, give it a empty list
self.pathDic[i] = []
def DFS(g):
time = 0
greyMark = set() # grey set for storing all points in recursive
blackMark = set() # black set for storing all points have done
def DFS_inner(g, i, time): # recursive function
time += 1
greyMark.add(i) # add to grey set
g.pointTime[i].append(time) # store discover time
for c in g.pathDic[i]:
if c in blackMark or c in greyMark:
pass
else:
time = DFS_inner(g, c, time)
time += 1
# store finish time, so finish time's index is 1
g.pointTime[i].append(time)
blackMark.add(i) # finish
greyMark.remove(i)
|
# delete grey set
retur
|
n time
for i in g.pointSet:
if i in blackMark or i in greyMark:
pass
else:
time = DFS_inner(g, i, time)
# format print
for k in g.pointSet:
print("{0} -> discover time is {1} -> finish time is {2}"
.format(k, g.pointTime[k][0], g.pointTime[k][1]))
return "done"
def topologicalSort(g):
DFS(g) # in case graph has not DFSed before
# create list of turtle that [(point, finish time)]
finishTimeList = []
for k, v in g.pointTime.items():
finishTimeList.append((k, v[1])) # v[1] is finish time
# sort elements increasing by finish time
finishTimeList.sort(key=lambda pair: pair[1])
# insert on the front of result list
result = []
for i in finishTimeList:
result.insert(0, i[0])
# format print
reStr = result[0]
for i in result[1:]:
reStr += " -> " + i
print(reStr)
return "done"
if __name__ == "__main__":
# test
test1set = ["u", "v", "w", "x", "y", "z", ]
test1path = {"u": ["x", "v"],
"v": ["y"],
"w": ["y", "z"],
"x": ["v"],
"y": ["x"],
"z": ["z"],
}
# q1 data
q1set = ["s", "v", "w", "q", "t", "x", "z", "y", "r", "u", ]
q1path = {"s": ["v"],
"v": ["w"],
"w": ["s"],
"q": ["s", "w", "t"],
"t": ["x", "y"],
"x": ["z"],
"z": ["x"],
"y": ["q"],
"r": ["u", "y"],
"u": ["y"],
}
# q2 data
q2set = ["m", "n", "q", "o", "p", "r", "s",
"t", "u", "v", "w", "x", "y", "z", ]
q2path = {"m": ["x", "q", "r", ],
"n": ["o", "q", "u", ],
"q": ["t", ],
"o": ["r", "s", "v", ],
"p": ["o", "s", "z", ],
"r": ["u", "y", ],
"s": ["r", ],
"t": [],
"u": ["t", ],
"v": ["x", "w", ],
"w": ["z", ],
"x": [],
"y": ["v", ],
"z": [],
}
# test1 = Graph(test1set, test1path)
q1 = Graph(q1set, q1path)
q2 = Graph(q2set, q2path)
DFS(q1)
print("\n")
topologicalSort(q2)
print("\n")
|
gavincyi/Telex
|
src/message.py
|
Python
|
apache-2.0
| 2,393
| 0.008776
|
#!/bin/python
import datetime
class message():
def __init__(self, msg_id=0, channel_id=0, source_id=0, source_chat_id='',\
msg=''):
curr_datetime = datetime.datetime.now()
self.date = curr_datetime.strftime("%Y%m%d")
self.time = curr_datetime.strftime("%H:%M:%S.%f %z")
self.msg_id = msg_id
self.channel_id = channel_id
self.source_id = source_id
self.source_chat_id = source_chat_id
self.msg = msg
def str(self):
return "'%s','%s',%d,%d,%d,'%s','%s'" \
% (self.date, \
self.time, \
self.msg_id, \
self.channel_id, \
self.source_id, \
self.source_chat_id, \
self.msg)
@staticmethod
def from_message_record(record, set_curr_time = True):
"""
Convert a db record to a message record
:param record: Database record
:param set_curr_time: Indicate if current date and time is set
"""
if not record:
ret = message()
else:
ret = message(msg_id=record[message.msg_id_index()],
channel_id=record[message.channel_id_index()],
source_id=record[message.source_id_index()],
source_chat_id=record[message.source_chat_id_index()],
msg=record[message.msg_index()])
if not set_curr_time:
ret.date = record[message.date_index()]
ret.time = record[message.time_index()]
return ret
@staticmethod
def field_str():
return "date text, time text, msgid int, channelid int, " + \
"sourceid int, sourcechatid
|
text, msg text"
@staticmethod
def key_str():
return "msgid"
@staticmethod
def date_index():
return 0
@staticmeth
|
od
def time_index():
return 1
@staticmethod
def msg_id_index():
return 2
@staticmethod
def channel_id_index():
return 3
@staticmethod
def source_id_index():
return 4
@staticmethod
def source_chat_id_index():
return 5
@staticmethod
def msg_index():
return 6
|
Nesiehr/osf.io
|
api_tests/nodes/views/test_node_contributors_list.py
|
Python
|
apache-2.0
| 95,975
| 0.003813
|
# -*- coding: utf-8 -*-
import random
import mock
from datetime import datetime
from nose.tools import * # flake8: noqa
from rest_framework import exceptions
from api.base.exceptions import Conflict
from api.base.settings.defaults import API_BASE
from api.nodes.serializers import NodeContributorsCreateSerializer
from framework.auth.core import Auth
from tests.base import ApiTestCase, capture_signals, fake
from osf_tests.factories import (
ProjectFactory,
AuthUserFactory,
UserFactory
)
from tests.utils import assert_logs
from website.models import NodeLog
from website.project.signals import contributor_added, unreg_contributor_added, contributor_removed
from website.util import permissions, disconnected_from_listeners
class NodeCRUDTestCase(ApiTestCase):
def setUp(self):
super(NodeCRUDTestCase, self).setUp()
self.user = AuthUserFactory()
self.user_two = AuthUserFactory()
self.title = 'Cool Project'
self.new_title = 'Super Cool Project'
self.description = 'A Properly Cool Project'
self.new_description = 'An even cooler project'
self.category = 'data'
self.new_category = 'project'
self.public_project = ProjectFactory(title=self.title,
description=self.description,
category=self.category,
is_public=True,
creator=self.user)
self.public_url = '/{}nodes/{}/'.format(API_BASE, self.public_project._id)
self.private_project = ProjectFactory(title=self.title,
description=self.description,
category=self.category,
is_public=False,
creator=self.user)
self.private_url = '/{}nodes/{}/'.format(API_BASE, self.private_project._id)
self.fake_url = '/{}nodes/{}/'.format(API_BASE, '12345')
def make_node_payload(node, attributes):
return {
'data': {
'id': node._id,
'type': 'nodes',
'attributes': attributes,
}
}
def make_contrib_id(node_id, user_id):
return '{}-{}'.format(node_id, user_id)
class TestNodeContributorList(NodeCRUDTestCase):
def setUp(self):
super(TestNodeContributorList,
|
self).setUp()
self.private_url = '/{}nodes/{}/contributors/'.format(API_BASE, self.private_project._id)
self.public_url = '/{}nodes/{}/contributors/'.format(API_BASE, self.public_project._id)
def test_concaten
|
ated_id(self):
res = self.app.get(self.public_url)
assert_equal(res.status_code, 200)
assert_equal(res.json['data'][0]['id'].split('-')[0], self.public_project._id)
assert_equal(res.json['data'][0]['id'].split('-')[1], self.user._id)
def test_permissions_work_with_many_users(self):
users = {
'admin': [self.user._id],
'write': [],
'read': []
}
for i in range(0, 25):
perm = random.choice(users.keys())
perms = []
if perm == 'admin':
perms = ['read', 'write', 'admin', ]
elif perm == 'write':
perms = ['read', 'write', ]
elif perm == 'read':
perms = ['read', ]
user = AuthUserFactory()
self.private_project.add_contributor(user, permissions=perms)
users[perm].append(user._id)
res = self.app.get(self.private_url, auth=self.user.auth)
data = res.json['data']
for user in data:
api_perm = user['attributes']['permission']
user_id = user['id'].split('-')[1]
assert user_id in users[api_perm], 'Permissions incorrect for {}. Should not have {} permission.'.format(user_id, api_perm)
def test_return_public_contributor_list_logged_out(self):
self.public_project.add_contributor(self.user_two, save=True)
res = self.app.get(self.public_url)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
assert_equal(len(res.json['data']), 2)
assert_equal(res.json['data'][0]['id'], make_contrib_id(self.public_project._id, self.user._id))
assert_equal(res.json['data'][1]['id'], make_contrib_id(self.public_project._id, self.user_two._id))
def test_return_public_contributor_list_logged_in(self):
res = self.app.get(self.public_url, auth=self.user_two.auth)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
assert_equal(len(res.json['data']), 1)
assert_equal(res.json['data'][0]['id'], make_contrib_id(self.public_project._id, self.user._id))
def test_return_private_contributor_list_logged_out(self):
res = self.app.get(self.private_url, expect_errors=True)
assert_equal(res.status_code, 401)
assert 'detail' in res.json['errors'][0]
def test_return_private_contributor_list_logged_in_contributor(self):
self.private_project.add_contributor(self.user_two)
self.private_project.save()
res = self.app.get(self.private_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
assert_equal(len(res.json['data']), 2)
assert_equal(res.json['data'][0]['id'], make_contrib_id(self.private_project._id, self.user._id))
assert_equal(res.json['data'][1]['id'], make_contrib_id(self.private_project._id, self.user_two._id))
def test_return_private_contributor_list_logged_in_non_contributor(self):
res = self.app.get(self.private_url, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert 'detail' in res.json['errors'][0]
def test_filtering_on_obsolete_fields(self):
# regression test for changes in filter fields
url_fullname = '{}?filter[fullname]=foo'.format(self.public_url)
res = self.app.get(url_fullname, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
errors = res.json['errors']
assert_equal(len(errors), 1)
assert_equal(errors[0]['detail'], u"'fullname' is not a valid field for this endpoint.")
# middle_name is now middle_names
url_middle_name = '{}?filter[middle_name]=foo'.format(self.public_url)
res = self.app.get(url_middle_name, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
errors = res.json['errors']
assert_equal(len(errors), 1)
assert_equal(errors[0]['detail'], "'middle_name' is not a valid field for this endpoint.")
def test_disabled_contributors_contain_names_under_meta(self):
self.public_project.add_contributor(self.user_two, save=True)
self.user_two.is_disabled = True
self.user_two.save()
res = self.app.get(self.public_url)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
assert_equal(len(res.json['data']), 2)
assert_equal(res.json['data'][0]['id'], make_contrib_id(self.public_project._id, self.user._id))
assert_equal(res.json['data'][1]['id'], make_contrib_id(self.public_project._id, self.user_two._id))
assert_equal(res.json['data'][1]['embeds']['users']['errors'][0]['meta']['full_name'], self.user_two.fullname)
assert_equal(res.json['data'][1]['embeds']['users']['errors'][0]['detail'], 'The requested user is no longer available.')
def test_total_bibliographic_contributor_count_returned_in_metadata(self):
non_bibliographic_user = UserFactory()
self.public_project.add_contributor(non_bibliographic_user, visible=False, auth=Auth(self.public_project.creator))
self.public_project.save()
res = self.app.get(self.public_url, auth=self.user_two.auth)
assert_equal(res.
|
jnewland/home-assistant
|
homeassistant/components/xs1/switch.py
|
Python
|
apache-2.0
| 1,288
| 0
|
"""Support for XS1 switches."""
import logging
from homeassistant.helpers.entity import ToggleEntity
from . import ACTUATORS, DOMAIN as COMPONENT_DOMAIN, XS1DeviceEntity
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up
|
the XS1 switch platform."""
from xs1_api_client.api_constants import ActuatorType
actuators = hass.data[COMPONENT_DOMAIN][ACTUATORS]
switch_entities = []
for actuator in actuators:
if (actuator.type() == ActuatorType.SWITCH) or \
(actuator.type() == ActuatorType.DIMMER):
switch_entities.append(XS1SwitchEntity(actuator))
async_add_entities(switch_entities)
class XS1SwitchEntity(XS1DeviceEntity, ToggleEntity):
"""Represe
|
ntation of a XS1 switch actuator."""
@property
def name(self):
"""Return the name of the device if any."""
return self.device.name()
@property
def is_on(self):
"""Return true if switch is on."""
return self.device.value() == 100
def turn_on(self, **kwargs):
"""Turn the device on."""
self.device.turn_on()
def turn_off(self, **kwargs):
"""Turn the device off."""
self.device.turn_off()
|
andrew749/andrew749.github.io
|
application/caches/test/test_local_cache.py
|
Python
|
apache-2.0
| 367
| 0.029973
|
import unittest
from application.caches.local_cache import LocalC
|
ache
class LocalCacheTest(unittest.TestCase):
def setUp(self):
self.cache = LocalCache()
def tearDown(self):
pass
def test_add_element(self):
self.cache
|
.add("test.key", "test.value")
self.assertEqual(self.cache.get("test.key"), "test.value")
if __name__ == "__main__":
unittest.main()
|
perey/pegl
|
src/pegl/ext/khr_vgimage.py
|
Python
|
gpl-3.0
| 983
| 0
|
#!/usr/bin/env python3
'''Khronos OpenVG parent image binding for EGL.
http://www.khronos.org/registry/egl/extensions/KHR/EGL_KHR_vg_parent_image.txt
'''
# Copyright © 2012-13 Tim Pederick.
#
# This file is part of Pegl.
#
# Pegl is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either vers
|
ion 3 of the License, or
# (at your option) any later version.
#
# Pegl is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; with
|
out even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pegl. If not, see <http://www.gnu.org/licenses/>.
# Local imports.
from .khr_image import Image
# Extension image target type.
Image.extend('EGL_KHR_vg_parent_image', {'VG_PARENT_IMAGE': 0x30BA})
|
pexip/os-kombu
|
docs/conf.py
|
Python
|
bsd-3-clause
| 924
| 0
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from sphinx_celery import conf
globals().update(conf.build_config(
'kombu', __file__,
project='Kombu',
version_dev='4.3',
version_stable='4.2',
canonical_url='http://docs.kombu.me',
webdomain='kombu.me',
|
github_project='celery/k
|
ombu',
author='Ask Solem & contributors',
author_name='Ask Solem',
copyright='2009-2016',
publisher='Celery Project',
html_logo='images/kombusmall.jpg',
html_favicon='images/favicon.ico',
html_prepend_sidebars=['sidebardonations.html'],
extra_extensions=['sphinx.ext.napoleon'],
apicheck_ignore_modules=[
'kombu.entity',
'kombu.messaging',
'kombu.asynchronous.aws.ext',
'kombu.asynchronous.aws.sqs.ext',
'kombu.transport.qpid_patches',
'kombu.utils',
'kombu.transport.virtual.base',
],
))
|
AndreasMadsen/bachelor-code
|
visualizer/__init__.py
|
Python
|
mit
| 71
| 0
|
from graph.graph_se
|
rver import G
|
raphServer
__all__ = ['GraphServer']
|
taoliu/MACS
|
MACS2/bdgbroadcall_cmd.py
|
Python
|
bsd-3-clause
| 2,141
| 0.01121
|
# Time-stamp: <2019-09-25 10:04:48 taoliu>
"""Description: Fine-tuning script to call broad peaks from a single
bedGraph track for scores.
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD License (see the file LICENSE included with
the distribution).
"""
# ----------------------------
|
--------
# python modules
# ------------------------------------
import sys
import os
import logging
from MACS2.IO import BedGraphIO
# ------------------------------------
# constants
# ------------------------------------
logging.basicConf
|
ig(level=20,
format='%(levelname)-5s @ %(asctime)s: %(message)s ',
datefmt='%a, %d %b %Y %H:%M:%S',
stream=sys.stderr,
filemode="w"
)
# ------------------------------------
# Misc functions
# ------------------------------------
error = logging.critical # function alias
warn = logging.warning
debug = logging.debug
info = logging.info
# ------------------------------------
# Classes
# ------------------------------------
# ------------------------------------
# Main function
# ------------------------------------
def run( options ):
info("Read and build bedGraph...")
bio = BedGraphIO.bedGraphIO(options.ifile)
btrack = bio.build_bdgtrack(baseline_value=0)
info("Call peaks from bedGraph...")
bpeaks = btrack.call_broadpeaks (lvl1_cutoff=options.cutoffpeak, lvl2_cutoff=options.cutofflink, min_length=options.minlen, lvl1_max_gap=options.lvl1maxgap, lvl2_max_gap=options.lvl2maxgap)
info("Write peaks...")
if options.ofile:
bf = open( os.path.join( options.outdir, options.ofile ), "w" )
options.oprefix = options.ofile
else:
bf = open ( os.path.join( options.outdir, "%s_c%.1f_C%.2f_l%d_g%d_G%d_broad.bed12" % (options.oprefix,options.cutoffpeak,options.cutofflink,options.minlen,options.lvl1maxgap,options.lvl2maxgap)), "w" )
bpeaks.write_to_gappedPeak(bf, name_prefix=(options.oprefix+"_broadRegion").encode(), score_column="score", trackline=options.trackline)
info("Done")
|
csvtools/csvtools
|
src/widgets/importwiz.py
|
Python
|
gpl-3.0
| 3,525
| 0.003972
|
from PyQt4.
|
QtCore import *
from PyQt4.QtGui import *
import lib.images_rc
import sys
class QImportWiz(QDialog):
#
# private
#
def _getCheckableList(self, sheets):
|
"""return an instance model with a sheet list composed of
three columns (sheet, number of rows, number of columns)
"""
model = QStandardItemModel(len(sheets), 3)
model.setHeaderData(0, Qt.Horizontal, self.tr('Sheet'))
model.setHeaderData(1, Qt.Horizontal, self.tr('Rows'))
model.setHeaderData(2, Qt.Horizontal, self.tr('Columns'))
for index, value in enumerate(sheets):
# get data
key = value[1]
numRows = value[2]
numColumns = value[3]
rowEnabled = numRows*numColumns>0
# key column
item = QStandardItem(key)
if len(sheets) == 1:
check = Qt.Checked
else:
check = Qt.Unchecked
item.setCheckState(check)
item.setCheckable(True)
item.setEditable (False)
item.setSelectable (False)
item.setEnabled(rowEnabled)
item.setData(key)
model.setItem (index, 0, item)
# num rows column
item =QStandardItem(str(numRows))
item.setEditable (False)
item.setSelectable (False)
item.setEnabled(rowEnabled)
item.setData(key)
model.setItem (index, 1, item)
# num columns column
item =QStandardItem(str(numColumns))
item.setEditable (False)
item.setSelectable (False)
item.setEnabled(rowEnabled)
item.setData(key)
model.setItem(index, 2, item)
return model
def _viewClicked(self):
sheets = self.sheets()
self.acceptButton.setEnabled(bool(sheets))
#
# public
#
def sheets(self):
"""returns key list of selected sheets"""
selects = []
for index in range(self.model.rowCount()):
item = self.model.item(index)
if item.checkState() == Qt.Checked:
key = item.data().toString()
selects.append(str(key))
return selects
#
# init
#
def __init__(self, sheets, *args):
QDialog.__init__ (self, *args)
self.acceptButton = QPushButton(self.tr('Accept'), self)
self.acceptButton.setIcon(QIcon(':images/accept.png'))
self.cancelButton = QPushButton(self.tr('Cancel'), self)
self.cancelButton.setIcon(QIcon(':images/cancel.png'))
buttonBox = QDialogButtonBox()
buttonBox.addButton(self.acceptButton, QDialogButtonBox.AcceptRole)
buttonBox.addButton(self.cancelButton, QDialogButtonBox.RejectRole)
buttonBox.accepted.connect(lambda: self.accept())
buttonBox.rejected.connect(lambda: self.reject())
self.model = self._getCheckableList(sheets)
view = QTreeView()
view.setRootIsDecorated(False)
view.setModel(self.model)
view.resizeColumnToContents(0)
view.resizeColumnToContents(1)
view.resizeColumnToContents(2)
view.clicked.connect(self._viewClicked)
self._viewClicked()
vbox = QVBoxLayout()
vbox.addWidget(view)
vbox.addWidget(buttonBox)
self.setLayout(vbox)
self.setWindowTitle(self.tr('Import Excel'))
self.setMinimumSize(300, 250)
self.resize(300, 250)
|
genome21/dcos-cli
|
dcos/jsonitem.py
|
Python
|
apache-2.0
| 7,404
| 0.00027
|
import collections
import json
import re
from dcos import util
from dcos.errors import DCOSException
logger = util.get_logger(__name__)
def parse_json_item(json_item, schema):
"""Parse the json item based on a schema.
:param json_item: A JSON item in the form 'key=value'
:type json_item: str
:param schema: The JSON schema to use for parsing
:type schema: dict
:returns: A tuple for the parsed JSON item
:rtype: (str, any) where any is one of str, int, float, bool, list or dict
"""
terms = json_item.split('=', 1)
if len(terms) != 2:
raise DCOSException('{!r} is not a valid json-item'.format(json_item))
# Check that it is a valid key in our jsonschema
key = terms[0]
value = parse_json_value(key, terms[1], schema)
return (json.dumps(key), value)
def parse_json_value(key, value, schema):
"""Parse the json value based on a schema.
:param key: the key property
:type key: str
:param value: the value of property
:type value: str
:param schema: The JSON schema to use for parsing
:type schema: dict
:returns: parsed value
:rtype: str | int | float | bool | list | dict
"""
value_type = find_parser(key, schema)
return value_type(value)
def find_parser(key, schema):
"""
:param key: JSON field
:type key: str
:param schema: The JSON schema to use
:type schema: dict
:returns: A callable capable of parsing a string to its type
:rtype: ValueTypeParser
"""
key_schema = schema['properties'].get(key)
if key_schema is None:
keys = ', '.join(schema['properties'].keys())
raise DCOSException(
'Error: {!r} is not a valid property. '
'Possible properties are: {}'.format(key, keys))
else:
return ValueTypeParser(key_schema)
class ValueTypeParser(object):
"""Callable for parsing a string against a known JSON type.
:param schema: The JSON type as a schema
:type schema: dict
"""
def __init__(self, schema):
self.schema = schema
def __call__(self, value):
"""
:param value: String to try and parse
:type value: str
:returns: The parse value
:rtype: str | int | float | bool | list | dict
"""
value = clean_value(value)
if self.schema['type'] == 'string':
if self.schema.get('format') == 'uri':
return _parse_url(value)
else:
return _parse_string(value)
elif self.schema['type'] == 'object':
return _parse_object(value)
elif self.schema['type'] == 'number':
return _parse_number(value)
elif self.schema['type'] == 'integer':
return _parse_integer(value)
elif self.schema['type'] == 'boolean':
return _parse_boolean(value)
elif self.schema['type'] == 'array':
return _parse_array(value)
else:
raise DCOSException('Unknown type {!r}'.format(self._value_type))
def clean_value(value):
"""
:param value: String to try and clean
:type value: str
:returns: The cleaned string
:rtype: str
"""
if len(value) > 1 and value.startswith('"') and value.endswith('"'):
return value[1:-1]
elif len(value) > 1 and value.startswith("'") and value.endswith("'"):
return value[1:-1]
else:
return value
def _parse_string(value):
"""
:param value: The string to parse
:type value: str
:returns: The parsed value
:rtype: str
"""
return None if value == 'null' else value
def _parse_object(value):
"""
:param value: The string to parse
:type value: str
:returns: The parsed value
:rtype: dict
"""
try:
json_object = json.loads(value)
if json_object is None or isinstance(json_object, collections.Mapping):
return json_object
else:
raise DCOSException(
'Unable to parse {!r} as a JSON object'.format(value))
except ValueError as error:
logger.exception('Error parsing value as a JSON object')
msg = 'Unable to parse {!r} as a JSON object: {}'.format(value, error)
raise DCOSException(msg)
def _parse_number(value):
"""
:param value: The string to parse
:type value: str
:returns: The parsed value
:rtype: float
"""
try:
return None if value == 'null' else float(value)
except ValueError as error:
logger.exception('Error parsing value as a JSON number')
msg = 'Unable to parse {!r} as a float: {}'.format(value, error)
raise DCOSException(msg)
def _parse_integer(value):
"""
:param value: The string to parse
:type value: str
:returns: The parsed value
:rtype: int
"""
try:
return None if value == 'null' else int(value)
except ValueError as error:
logger.exception('Error parsing value as a JSON integer')
msg = 'Unable to parse {!r} as an int: {}'.format(value, error)
raise DCOSException(msg)
def _parse_boolean(value):
"""
:param value: The string to parse
:type value: str
:returns: The parsed value
:rtype: bool
"""
try:
boolean = json.loads(value)
if boolean is None or isinstance(boolean, bool):
return boolean
else:
raise DCOSException(
'Unable to parse {!r} as a boolean'.format(value))
except ValueError as error:
logger.exception('Error parsing value as a JSON boolean')
msg = 'Unable to parse {!r} as a boolean: {}'.format(value, error)
raise DCOSException(msg)
def _parse_array(value):
"""
:param value: The s
|
tring to parse
:type value: str
:returns: The parsed value
:rtype: list
"""
try:
array = json.loads(value)
if array is None or isinstance(array, collections.Sequence):
|
return array
else:
raise DCOSException(
'Unable to parse {!r} as an array'.format(value))
except ValueError as error:
logger.exception('Error parsing value as a JSON array')
msg = 'Unable to parse {!r} as an array: {}'.format(value, error)
raise DCOSException(msg)
def _parse_url(value):
"""
:param value: The url to parse
:type url: str
:returns: The parsed value
:rtype: str
"""
scheme_pattern = r'^(?P<scheme>(?:(?:https?)://))'
domain_pattern = (
r'(?P<hostname>(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.?)+'
'(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)?|') # domain,
value_regex = re.match(
scheme_pattern + # http:// or https://
r'(([^:])+(:[^:]+)?@){0,1}' + # auth credentials
domain_pattern +
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}))' # or ip
r'(?P<port>(?::\d+))?' # port
r'(?P<path>(?:/?|[/?]\S+))$', # resource path
value, re.IGNORECASE)
if value_regex is None:
scheme_match = re.match(scheme_pattern, value, re.IGNORECASE)
if scheme_match is None:
msg = 'Please check url {!r}. Missing http(s)://'.format(value)
raise DCOSException(msg)
else:
raise DCOSException(
'Unable to parse {!r} as a url'.format(value))
else:
return value
|
argilo/bbhn-utils
|
nodeinfo.py
|
Python
|
gpl-3.0
| 1,671
| 0.000598
|
#!/usr/bin/env python3
# Copyright 2017-2018 Clayton Smith
#
# This file is part of bbhn-utils
#
# bbhn-utils is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# bbhn-utils is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with bbhn-utils; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
from flask import Flask, render_template, Response
import json
from datetime import *
from node_db i
|
mport NodeDB
DISPLAY_HOURS = 24
node_db = NodeDB()
app = Flask(__name__, static_url_path='/static')
@app.route('/')
def node_info():
last_seen = node_db.last_seen(DISPLAY_HOURS)
return render_template('index.html', last_seen=last_seen)
@app.route('/link/<ip>.html')
def link_info(ip):
name = node_db.name(ip)
neighbours = node_db.neighbours(ip,
|
DISPLAY_HOURS)
for i, neighbour in enumerate(neighbours):
cost = node_db.cost_history(ip, neighbour[1], DISPLAY_HOURS)
cost = [(ts.timestamp() * 1000, lq) for ts, lq in cost]
neighbours[i] = neighbour + (json.dumps(cost),)
return render_template('link.html', ip=ip, name=name, neighbours=neighbours)
if __name__ == '__main__':
app.run(host='0.0.0.0')
node_db.close()
|
asedunov/intellij-community
|
python/testData/completion/heavyStarPropagation/lib/_pkg1/_pkg1_1/_pkg1_1_1/_pkg1_1_1_0/_pkg1_1_1_0_0/_mod1_1_1_0_0_1.py
|
Python
|
apache-2.0
| 128
| 0.007813
|
name1_1_1_0_0
|
_1_0 = None
name1_1_1_0_0_1_1 = None
name1_1_1_0_0_1_2 = None
name1_1_1_0_0_1_3 = None
name1_1_1_0_0_1_
|
4 = None
|
teythoon/Insekta
|
docs/conf.py
|
Python
|
mit
| 7,761
| 0.007473
|
# -*- coding: utf-8 -*-
#
# Insekta documentation build configuration file, created by
# sphinx-quickstart on Sat Dec 24 16:48:19 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Insekta'
copyright = u'2011, Insekta team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Insektadoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '1
|
1pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping
|
the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Insekta.tex', u'Insekta Documentation',
u'Insekta team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'insekta', u'Insekta Documentation',
[u'Insekta team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Insekta', u'Insekta Documentation',
u'Insekta team', 'Insekta', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
RoboPi-CSEDU/rupai
|
Adafruit_Python_SSD1306/examples/shapes.py
|
Python
|
mit
| 4,281
| 0.002569
|
# Copyright (c) 2014 Adafruit Industries
# Author: Tony DiCola
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import time
import Adafruit_GPIO.SPI as SPI
import Adafruit_SSD1306
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
# Raspberry Pi pin configuration:
RST = 24
# Note the following are only used with SPI:
DC = 23
SPI_PORT = 0
SPI_DEVICE = 0
# Beaglebone Black pin configuration:
# RST = 'P9_12'
# Note the following are only used with SPI:
# DC = 'P9_15'
# SPI_PORT = 1
# SPI_DEVICE = 0
# 128x32 display with hardware I2C:
# disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST)
# 128x64 display with hardware I2C:
# disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST)
# Note you can change the I2C address by passing an i2c_ad
|
dress parameter like:
# disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST, i2c_address=0x3C)
# Alternatively you can specify an explicit I2C bus number, for example
# with the 128x32 display you would use:
|
# disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST, i2c_bus=2)
# 128x32 display with hardware SPI:
# disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST, dc=DC, spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE, max_speed_hz=8000000))
# 128x64 display with hardware SPI:
disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST, dc=DC, spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE, max_speed_hz=8000000))
# Alternatively you can specify a software SPI implementation by providing
# digital GPIO pin numbers for all the required display pins. For example
# on a Raspberry Pi with the 128x32 display you might use:
# disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST, dc=DC, sclk=18, din=25, cs=22)
# Initialize library.
disp.begin()
# Clear display.
disp.clear()
disp.display()
# Create blank image for drawing.
# Make sure to create image with mode '1' for 1-bit color.
width = disp.width
height = disp.height
image = Image.new('1', (width, height))
# Get drawing object to draw on image.
draw = ImageDraw.Draw(image)
# Draw a black filled box to clear the image.
draw.rectangle((0,0,width,height), outline=0, fill=0)
# Draw some shapes.
# First define some constants to allow easy resizing of shapes.
padding = 2
shape_width = 20
top = padding
bottom = height-padding
# Move left to right keeping track of the current x position for drawing shapes.
x = padding
# Draw an ellipse.
draw.ellipse((x, top , x+shape_width, bottom), outline=255, fill=0)
x += shape_width+padding
# Draw a rectangle.
draw.rectangle((x, top, x+shape_width, bottom), outline=255, fill=0)
x += shape_width+padding
# Draw a triangle.
draw.polygon([(x, bottom), (x+shape_width/2, top), (x+shape_width, bottom)], outline=255, fill=0)
x += shape_width+padding
# Draw an X.
draw.line((x, bottom, x+shape_width, top), fill=255)
draw.line((x, top, x+shape_width, bottom), fill=255)
x += shape_width+padding
# Load default font.
font = ImageFont.load_default()
# Alternatively load a TTF font. Make sure the .ttf font file is in the same directory as the python script!
# Some other nice fonts to try: http://www.dafont.com/bitmap.php
#font = ImageFont.truetype('Minecraftia.ttf', 8)
# Write two lines of text.
draw.text((x, top), 'Hello', font=font, fill=255)
draw.text((x, top+20), 'World!', font=font, fill=255)
# Display image.
disp.image(image)
disp.display()
|
bvalot/panISa
|
lib/clipread.py
|
Python
|
gpl-3.0
| 2,634
| 0.007976
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
##Copyright (c) 2017 Benoit Valot and Panisa Treepong
##benoit.valot@univ-fcomte.fr
##UMR 6249 Chrono-Environnement, Besançon, France
##Licence GPL
from . import variables
class ClipRead():
"""Clip read object"""
def __init__(self, alignedsegment):
self.read_seq = alignedsegment.query_sequence
self.read_name = alignedsegment.query_name
self.read_start = alignedsegment.query_alignment_start #0 left
self.read_end = alignedsegment.query_alignment_end #exclusive
self.read_len = alignedsegment.query_alignment_length
self.ref_start = alignedsegment.reference_start #0 left
self.ref_end = alignedsegment.reference_end # exclusive
self.ref_len = alignedsegment.reference_length
self.cigar = alignedsegment.cigarstring
self.cigartuples = alignedsegment.cigartuples
self.isreverse = alignedsegment.is_reverse
def isstartclip(self):
"""Test if the read is start or end clip, look at """
if self.cigartuples is None:
raise Exception("ClipRead must be aligned")
if self.cigartuples[0][0] in variables.cigarclip:
return True
elif self.cigartuples[-1][0] in variables.cigarclip:
return False
else:
raise Exception("ClipRead must contain clip part at start or end")
def getdr(self, drstart, drend):
"""Return the dr sequence if complete or return None"""
s = self.read_start + (drstart - self.ref_start) ##if < 0, incomplet dr
if s < 0:
return None
e = self.read_end - (self.ref_end - drend)
if e > len(self.read_seq):
return None
return self.read_seq[s:e]
def getclippos(self):
"""Return the position of the clip"""
if self.isstartclip():
return self.ref_start
else:
return self.ref_end
def getclipseq(self):
"""return clip part of the read, except for hard clip return None"""
if len(self.read_seq) == self.read_len:
return None
if self.isstartclip():
return self.read_seq[:self.read_start]
else:
return self.read_seq[self.read_end:]
def __len__(self):
return len(self.read_seq)
def __r
|
epr__(self):
return self.read_seq
def __str__(self):
return str(self.ref_start) + ": " + str(self.read_start) + self.read_seq
|
+ \
str(self.read_end) + " :" + str(self.ref_end)
if __name__=='__main__':
import doctest
doctest.testmod()
|
frownless/EMUstack
|
backend/mode_calcs.py
|
Python
|
gpl-3.0
| 13,371
| 0.004712
|
"""
mode_calcs.py is a subroutine of EMUstack that contains methods to
calculate the modes of a given layer, either analytically
(class 'Anallo') or from the FEM routine (class 'Simmo').
Copyright (C) 2013 Bjorn Sturmberg, Kokou Dossou, Felix Lawrence
EMUstack is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
import sys
from scipy import sqrt
sys.path.append("../backend/")
import materials
import objects
from Fortran import EMUstack
_interfaces_i_have_known = {}
pi = np.pi
class Modes(object):
""" Super-class from which Simmo and Anallo inherit common functionality"""
def k_pll_norm(self):
return self.light.k_pll * self.structure.period
def wl_norm(self):
wl = float(self.light.wl_nm) / self.structure.period
if self.light.wl_nm % self.structure.period == 0:
wl += 1e-15
return wl
def air_ref(self):
""" Return an :Anallo: for air for the same :Light: as this."""
return self.light._air_ref(self.structure.period)
def calc_grating_orders(self, max_order):
""" Return the grating order indices px and py, unsorted."""
# Create arrays of grating order indexes (-p, ..., p)
pxs = pys = np.arange(-max_order, max_order + 1)
# The inner loop in the fortran is over y, not x
# So we call meshgrid with y first
pys_mesh, pxs_mesh = np.meshgrid(pys, pxs)
# Which elements of pys_mesh and pxs_mesh correspond to
# orders low enough that we're interested in?
low_ord = (pxs_mesh**2 + pys_mesh**2 <= max_order**2)
return pxs_mesh[low_ord], pys_mesh[low_ord]
def prop_fwd(self, height_norm):
""" Return the matrix P corresponding to forward propagation/decay"""
return np.mat(np.diag(np.exp(1j * self.k_z * height_norm)))
def __del__(self):
# Clean up _interfaces_i_have_known to avoid memory leak
if _interfaces_i_have_known != None:
for key in _interfaces_i_have_known.keys():
if id(self) in key:
_interfaces_i_have_known.pop(key)
class Anallo(Modes):
""" Like a :Simmo:, but for a thin film, and calculated analytically."""
def __init__(self, thin_film, light):
self.structure = thin_film
self.light = light
self.max_order_PWs = light.max_order_PWs
self.is_air_ref = False
def calc_modes(self):
#TODO: switch to just using calc_kz()?
kzs = self.calc_kz()
self.k_z = np.append(kzs, kzs) # add 2nd polarisation
self.structure.num_pw_per_pol = len(kzs)
def calc_kz(self):
""" Return a sorted 1D array of grating orders' kz."""
d = 1 #TODO: are lx, ly relevant here??
# Calculate vectors of pxs and pys of all orders
# with px^2 + py^2 <= self.max_order_PWs
pxs, pys = self.calc_grating_orders(self.max_order_PWs)
# Calculate k_x and k_y components of scattered PWs
# (using the grating equation)
alpha0, beta0 = self.k_pll_norm()
alphas = alpha0 + pxs * 2 * pi / d
betas = beta0 + pys * 2 * pi / d
k_z_unsrt = sqrt(self.k()**2 - alphas**2 - betas**2)
if self.is_air_ref:
assert not hasattr(self, 'sort_order'), \
"Are you sure you want to reset the sort_order?"
# Sort the modes from propagating to fastest decaying
# k_z is real for propagating waves
# This must be done consistently
s = np.argsort(-1*k_z_unsrt.real + k_z_unsrt.imag)
self.sort_order = s
else:
s = self.air_ref().sort_order
assert s.shape == k_z_unsrt.shape, (s.shape,
k_z_unsrt.shape)
# Find element of k_z_unsrt corresponding to zeroth order
self.specular_order = np.nonzero((pxs[s] == 0) * (pys[s] == 0))[0][0]
# Calculate number of propagating plane waves in thin film
self.num_prop_pw_per_pol = (k_z_unsrt.imag == 0).sum()
return k_z_unsrt[s]
def n(self):
if self.structure.loss:
return self.structure.material.n(self.light.wl_nm)
else:
return self.structure.material.n(self.light.wl_nm).real
def k(self):
""" Return the normalised wavenumber in the background material"""
return 2 * pi * self.n() / self.wl_norm()
def Z(self):
""" Return the wave impedance as a 1D array."""
# Zcr is relative characteristic impedance Zc / Z0
# Zcr = 1/n assumes that relative permeability is 1
# Otherwise, use Zcr = \sqrt(epsilon_r / mu_r)
Zcr = 1./self.n()
# self.k_z repeats itself halfway through
# First half is for TE pol, second is for TM
num_pw2 = len(self.k_z) / 2
k_z = self.k_z[:num_pw2]
assert (k_z == self.k_z[num_pw2:]).all()
# Calculate the (relative) wave impedances Z
# TE (E in interface plane): Z = Zcr * k/k_z
# TM (H in interface plane): Z = Zcr / (k/k_z)
k_on_kz = self.k() / k_z
# TE is always represented first
return np.concatenate((Zcr * k_on_kz, Zcr / k_on_kz))
def specular_incidence(self, pol = 'TE'):
""" Return a vector of plane wave amplitudes corresponding
to specular incidence in the specified polarisation.
i.e. all elements are 0 except the zeroth order.
"""
# Element corresponding to 0th order, TE
spec_TE = self.specular_order
# Element corresponding to 0th order, TM
spec_TM = self.specular_order + self.structure.num_pw_per_pol
tot_num_pw = self.structure.num_pw_per_pol * 2
inc_amp = np.mat(np.zeros(tot_num_pw, dtype='complex128')).T
|
if 'TE' == pol:
inc_amp[spec_TE] = 1
elif 'TM' == pol:
inc_amp[spec_TM] = 1
elif 'R Circ' == pol:
inc_amp[spec_TE] = 1/sqrt(2.)
inc_amp[spec_TM] = +1j/sqrt(2.)
elif 'L Circ' == pol:
inc_amp[spec_TE] = 1/sqrt(2.)
inc_amp[spec_TM] = -1j/sqrt(2.)
else:
raise NotImplementedError, \
"Must select from the currently implemented polarisations; \
TE, TM, R Circ, L Circ."
return inc_amp
class Simmo(Modes):
"""docstring for Simmo"""
def __init__(self, structure, light):
self.structure = structure
self.light = light
self.max_order_PWs = light.max_order_PWs
self.prop_consts = None
self.mode_pol = None
def calc_modes(self, num_BM, delete_working = True):
""" Run the FEM in Fortran """
st = self.structure
wl = self.light.wl_nm
if self.structure.diameter2 == 0:
nb_typ_el = 2
else:
nb_typ_el = 3
n_effs = np.array([st.background.n(wl), st.inclusion_a.n(wl),
st.inclusion_b.n(wl)])
n_effs = n_effs[:nb_typ_el]
if self.structure.loss == False:
n_effs = n_effs.real
pxs, pys = self.calc_grating_orders(self.max_order_PWs)
num_pw_per_pol = pxs.size
self.num_BM = num_BM
assert num_BM > num_pw_per_pol * 2, "You must include at least as many BMs as PWs. \n
|
|
moteloquito/final-project
|
final/apps/fondo/views.py
|
Python
|
gpl-3.0
| 5,222
| 0.003255
|
from django.contrib.auth import authenticate, login
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.models import User
from django.core import serializers
from django.core.paginator import Paginator, EmptyPage
from django.core.serializers.json import DjangoJSONEncoder
from django.http.response import HttpResponse, HttpResponseForbidden
from django.shortcuts import get_object_or_404, render_to_response, render
from django.template import RequestContext
from django.utils import simplejson
from django.http import Http404
from django.template import TemplateDoesNotExist
# from django.views.generic.simple import direct_to_template
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework import status
from final.apps.fondo.domain.serializers import FondoSerializer, TicketSerializer, QuerySetSerializer
from final.apps.fondo.models import PettyCash, Ticket
import logging
_logger = logging.getLogger(__name__)
def main(request):
u = request.user
fondos = []
# fondos = u.customuser.pettycash_set.all()
return render_to_response('fondo/main.html', {'fondos': fondos})
def get_fondo_status(request, fondo_id):
""" Gets the current status.
"""
fondo = get_object_or_404(PettyCash, pk=fondo_id)
submited = fondo.ticket_set.filte
|
r(status='SUBM')
aproved = fondo
|
.ticket_set.filter(status='OPEN')
total_submited = 0.0
total_aproved = 0.0
if submited:
total_submited = sum(t.value for t in submited)
if aproved:
total_aproved = sum(t.value for t in aproved)
print ("Status: submited %s, aproved %s" % (total_submited, total_aproved))
data = {}
data['submited'] = total_submited
data['aproved'] = total_aproved
return HttpResponse(simplejson.dumps(data, cls=DjangoJSONEncoder), mimetype='application/json')
def get_ticket_for_fondo(request, fondo_id):
page = 1
size = 5
if request.POST.get('page'):
page = request.POST['page']
if request.POST.get('size'):
size = request.POST['size']
_logger.debug("Page: %s, size: %s" % (page, size))
fondo = get_object_or_404(PettyCash, pk=fondo_id)
tickets = fondo.ticket_set.all()
p = Paginator(tickets, size)
try:
pag = p.page(page)
# tickets = QuerySetSerializer().serialize(pag)
tickets = []
for t in pag:
ticket = {}
ticket['id'] = str(t.id)
ticket['value'] = str(t.value)
ticket['description'] = str(t.description)
ticket['date'] = str(t.date)
tickets.append(ticket)
pagination = {}
pagination['has_previous'] = pag.has_previous()
pagination['has_next'] = pag.has_next()
pagination['page'] = page
pagination['size'] = size
data = {}
data['tickets'] = tickets
data['pagination'] = pagination
# data = simplejson.dumps(pagination)
# tickets = serializers.serialize('json', p.page(page))
except EmptyPage:
return HttpResponse({'error': 'Object is not your own'}, status=status.HTTP_404_NOT_FOUND, mimetype='application/json')
return HttpResponse(simplejson.dumps(data), mimetype='application/json')
class FondoViewSet(viewsets.ModelViewSet):
queryset = PettyCash.objects.all()
serializer_class = FondoSerializer
def list(self, request):
if self.is_superuser(request):
q = PettyCash.objects.all()
else:
q = request.user.fondo_set.all()
return Response(FondoSerializer(q).data)
def retrieve(self, request, pk=None):
try:
f = PettyCash.objects.get(pk=pk)
except PettyCash.DoesNotExist:
return Response({'error': 'Not found'}, status=status.HTTP_404_NOT_FOUND)
serializer = FondoSerializer(f)
if self.is_superuser(request) or f.owner.id == request.user.id:
return Response(serializer.data)
return Response({'error': 'Object is not your own'}, status=status.HTTP_404_NOT_FOUND)
def is_superuser(self, request):
"""
Indicates if user is a superuser
"""
if hasattr(request, 'user') and request.user.is_superuser:
return True
return False
class TicketViewSet(viewsets.ModelViewSet):
queryset = Ticket.objects.all()
serializer_class = TicketSerializer
def list(self, request):
status = request.GET.get('status')
fondo_id = request.GET.get('fondo')
_logger.debug("Getting tickets for fondo %s and status %s" % (fondo_id, status))
user = request.user
fondo = get_object_or_404(PettyCash, pk=fondo_id)
if status is not None:
q = Ticket.objects.filter(fondo=fondo, status=status)
else:
q = Ticket.objects.filter(fondo=fondo)
return Response(TicketSerializer(q).data)
def template_pages(request, page):
try:
template_name = "template/%s.html" % page
return render_to_response(template_name, {}, context_instance=RequestContext(request))
except TemplateDoesNotExist:
raise Http404()
|
Wang-Sen/nqzx-backend
|
bootcamp/app/views.py
|
Python
|
gpl-3.0
| 11,441
| 0.007145
|
# -*- coding: utf8 -*-
from django.conf import settings
from django.contrib import auth
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.views.decorators.csrf import csrf_exempt, csrf_protect
from django.views.decorators.cache import never_cache
from django.views.decorators.http import require_POST
from bootcamp.decorators import ajax_required
from registration.users import UserModel
from django.contrib.auth.models import User
from bootcamp.feeds.models import Feed
from django.core.context_processors import csrf
from django.template.loader import render_to_string
from django.shortcuts import render, redirect, get_object_or_404
import random
import json
FEEDS_NUM_PAGES = 20
MAJOR_VERSION = 0
MID_VERSION = 1
MIN_VERSION = 3
NOTE = """
更新内容:
1. 删除评论、帖子,取消赞扣分以防刷经验;
2. 增加修改资料功能;
"""
URL = "http://nqzx.net/media/ads/nqzx.apk"
def check_version(version):
ret = False
ls = version.split('.')
if MAJOR_VERSION > int(ls[0]):
ret = True
elif MID_VERSION > int(ls[1]):
ret = True
elif MIN_VERSION > int(ls[2]):
ret = True
else:
ret = False
return ret
def get_level(reputation):
if not reputation:
return 1;
if reputation < 5:
return 1
elif reputation < 15:
return 2
elif reputation < 30:
return 3
elif reputation < 50:
return 4
elif reputation < 100:
return 5
elif reputation < 200:
return 6
elif reputation < 500:
return 7
elif reputation < 1000:
return 8
elif reputation < 2000:
return 9
elif reputation < 3000:
return 10
elif reputation < 6000:
return 11
elif reputation < 10000:
return 12
elif reputation < 18000:
return 13
elif reputation < 30000:
return 14
elif reputation < 60000:
return 15
elif reputation < 100000:
return 16
elif reputation < 300000:
return 17
else:
return 18
@require_POST
@ajax_required
def login(request):
username = request.POST.get('account')
password = request.POST.get('password')
result = {"status": False, "data":""}
if not username or not password:
result = {"status": False, "data":"未收到用户名或密码!"}
return HttpResponse(json.dumps(result), content_type="application/json")
if username=="" or username.isspace():
result = {"status": False, "data":"用户名不能为空"}
return HttpResponse(json.dumps(result), content_type="application/json")
if password=="" or password.isspace():
result = {"status": False, "data":"密码不能为空"}
return HttpResponse(json.dumps(result), content_type="application/json")
user = auth.authenticate(username=username, password=password)
if user is not None:
if user.is_active:
auth.login(request, user)
result = {"status": True, "data": {"id": user.id, "email": user.email, \
"location": user.profile.location, "mobile": user.profile.mobile, "reputation": \
user.profile.reputation,"signdate": user.profile.signdate}}
else:
result = {"status": False, "data":"["+username+"]已被暂时禁用"}
else:
result = {"status": False, "data":"用户名或密码不正确,请重试"}
return HttpResponse(json.dumps(result), content_type="application/json")
@require_POST
@ajax_required
def reg(request):
username = request.POST.get('account')
password = request.POST.get('password')
email = request.POST.get('email')
result = {"status": False, "data":""}
if not username or not password or not email:
result = {"status": False, "data":"未收到用户名、密码或者用户名!"}
return HttpResponse(json.dumps(result), content_type="application/json")
if username=="" or username.isspace():
result = {"status": False, "data":"用户名不能为空"}
return HttpResponse(json.dumps(result), content_type="application/json")
if password=="" or password.isspace():
result = {"status": False, "data":"密码不能为空"}
return HttpResponse(json.dumps(result), content_type="application/json")
if email=="" or email.isspace():
result = {"status": False, "data":"邮箱不能为空"}
return HttpResponse(json.dumps(result), content_type="application/json")
# clean data
existing = UserModel().objects.filter(username__iexact=username)
if existing.exists():
result = {"status": False, "data":"用户名已经存在"}
return HttpResponse(json.dumps(result), content_type="application/json")
if UserModel().objects.filter(email__iexact=email):
result = {"status": False, "data":"邮箱已经存在"}
return HttpResponse(json.dumps(result), content_type="application/json")
user = UserModel().objects.create_user(username, email, password)
user.is_active = True
user.
|
save()
result = {"status": True, "data": {"id": user.id, "email": user.email, \
"location": user.profile.location, "mobile": user.profile.mobile, "reputation": \
user.profile.reputation,"signdate": user.profile.signdate}}
return HttpResponse(json.dumps(result), content_type="application/json")
@require_POST
@ajax_required
def get_state(request):
user = request.user
state = {"id": user.id, "u
|
sername": user.username, "email": user.email, "location": user.profile.location, \
"mobile": user.profile.mobile, "reputation": user.profile.reputation,"first_name": user.first_name, \
"sex": user.profile.sex,"signdate": user.profile.signdate}
return HttpResponse(json.dumps(state), content_type="application/json")
@require_POST
@ajax_required
def set_state(request):
result = {"status": False, "data": {}}
userid = request.POST.get('userid')
user = User.objects.get(pk=userid)
if not user:
return HttpResponse(json.dumps(state), content_type="application/json")
first_name = request.POST.get('first_name')
location = request.POST.get('location')
mobile = request.POST.get('mobile')
reputation = request.POST.get('reputation')
sex = request.POST.get('sex')
signdate = request.POST.get('signdate')
if first_name:
user.first_name = first_name;
if location:
user.profile.location = location
if mobile:
user.profile.mobile = mobile
if reputation:
user.profile.reputation = reputation
if sex:
user.profile.sex = sex
if signdate:
user.profile.signdate = signdate
user.save()
result = {"status": True, "data": {"first_name": first_name, "sex": sex, \
"location":location,"mobile":mobile,"reputation":reputation,"signdate":signdate}}
return HttpResponse(json.dumps(result), content_type="application/json")
def get_feeds(request):
page = 1
feed_id = request.POST["feed_id"]
csrf_token = unicode(csrf(request)['csrf_token'])
html = u''
if feed_id:
feed = Feed.objects.get(pk=feed_id)
html = u'{0}{1}'.format(html, render_to_string('app/partial_feed.html', {
'feed': feed,
'user': request.user,
'csrf_token': csrf_token,
'lvl': get_level(feed.user.profile.reputation),
})
)
else:
feeds = Feed.get_feeds()
paginator = Paginator(feeds, FEEDS_NUM_PAGES)
feeds = paginator.page(page)
for feed in feeds:
html = u'{0}{1}'.format(html, render_to_string('app/partial_feed.html', {
'feed': feed,
'user': request.user,
'csrf_token': csrf_token,
'lvl': get_level(feed.user.profile.reputation),
})
)
return HttpResponse(html)
@ajax_required
def checkupdate(request):
version = request.POST.get('version')
ret = {"status": check_version(version), "note": NOTE, "url": URL}
return HttpResponse(json.dumps(ret), content_type="application/json")
def _html_feeds(last_feed, user, csrf_token, feed_source='all'):
feeds = Feed.get_feeds_after(last_feed)
if feed_source != 'all':
feeds = feeds.filter(user__id=feed_source
|
oVirt/vdsm
|
lib/vdsm/host/__init__.py
|
Python
|
gpl-2.0
| 2,088
| 0.000479
|
#
# Copyright 2008-2019 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details
|
of the license
#
from __future__ import absolute_import
from __future__ import division
import os
import logging
from vdsm.common import cpuarch
from vdsm.common.cache import m
|
emoized
from vdsm import dmidecodeUtil
P_VDSM_NODE_ID = '/etc/vdsm/vdsm.id'
@memoized
def uuid():
host_UUID = None
try:
if os.path.exists(P_VDSM_NODE_ID):
with open(P_VDSM_NODE_ID) as f:
host_UUID = f.readline().replace("\n", "")
else:
arch = cpuarch.real()
if cpuarch.is_x86(arch):
try:
hw_info = dmidecodeUtil.getHardwareInfoStructure()
host_UUID = hw_info['systemUUID'].lower()
except KeyError:
logging.warning('Could not find host UUID.')
elif cpuarch.is_ppc(arch):
# eg. output IBM,03061C14A
try:
with open('/proc/device-tree/system-id') as f:
systemId = f.readline()
host_UUID = systemId.rstrip('\0').replace(',', '')
except IOError:
logging.warning('Could not find host UUID.')
except:
logging.error("Error retrieving host UUID", exc_info=True)
return host_UUID
|
ryanpdwyer/newtex
|
newtex/tests/test_git.py
|
Python
|
mit
| 116
| 0
|
import unittest
class TestExample(unittest.TestCase):
def test_example(self):
|
self.assertEquals(0, 0)
|
|
lemoncoin/lemoncoin
|
contrib/spendfrom/spendfrom.py
|
Python
|
mit
| 10,043
| 0.005775
|
#!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("
|
20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default
|
location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 13338 if testnet else 3338
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default
|
wolfram74/numerical_methods_iserles_notes
|
venv/lib/python2.7/site-packages/sympy/solvers/tests/test_solvers.py
|
Python
|
mit
| 58,517
| 0.002956
|
from sympy import (
Abs, And, Derivative, Dummy, Eq, Float, Function, Gt, I, Integral,
LambertW, Lt, Matrix, Or, Piecewise, Poly, Q, Rational, S, Symbol,
Wild, acos, asin, atan, atanh, cos, cosh, diff, erf, erfinv, erfc,
erfcinv, erf2, erf2inv, exp, expand, im, log, pi, re, sec, sin,
sinh, solve, solve_linear, sqrt, sstr, symbols, sympify, tan, tanh,
root, simplify, atan2, arg, Mul, SparseMatrix, ask, Tuple, nsolve, oo)
from sympy.core.function import nfloat
from sympy.solvers import solve_linear_system, solve_linear_system_LU, \
solve_undetermined_coeffs
from sympy.solvers.solvers import _invert, unrad, checksol, posify, _ispow, \
det_quick, det_perm, det_minor
from sympy.physics.units import cm
from sympy.polys.rootoftools import RootOf
from sympy.utilities.pytest import slow, XFAIL, raises, skip, ON_TRAVIS
from sympy.utilities.randtest import verify_numerically as tn
from sympy.abc import a, b, c, d, k, h, p, x, y, z, t, q, m
def NS(e, n=15, **options):
return sstr(sympify(e).evalf(n, **options), full_prec=True)
def test_swap_back():
f, g = map(Function, 'fg')
fx, gx = f(x), g(x)
assert solve([fx + y - 2, fx - gx - 5], fx, y, gx) == \
{fx: gx + 5, y: -gx - 3}
assert solve(fx + gx*x - 2, [fx, gx]) == {fx: 2, gx: 0}
assert solve(fx + gx**2*x - y, [fx, gx]) == [{fx: y - gx**2*x}]
assert solve([f(1) - 2, x + 2]) == [{x: -2, f(1): 2}]
def guess_solve_strategy(eq, symbol):
try:
solve(eq, symbol)
return True
except (TypeError, NotImplementedError):
return False
def test_guess_poly():
# polynomial equations
assert guess_solve_strategy( S(4), x ) # == GS_POLY
assert guess_solve_strategy( x, x ) # == GS_POLY
assert guess_solve_strategy( x + a, x ) # == GS_POLY
assert guess_solve_strategy( 2*x, x ) # == GS_POLY
assert guess_solve_strategy( x + sqrt(2), x) # == GS_POLY
assert guess_solve_strategy( x + 2**Rational(1, 4), x) # == GS_POLY
assert guess_solve_strategy( x**2 + 1, x ) # == GS_POLY
assert guess_solve_strategy( x**2 - 1, x ) # == GS_POLY
assert guess_solve_strategy( x*y + y, x ) # == GS_POLY
assert guess_solve_strategy( x*exp(y) + y, x) # == GS_POLY
assert guess_solve_strategy(
(x - y**3)/(y**2*sqrt(1 - y**2)), x) # == GS_POLY
def test_guess_poly_cv():
# polynomial equations via a change of variable
assert guess_solve_strategy( sqrt(x) + 1, x ) # == GS_POLY_CV_1
assert guess_solve_strategy(
x**Rational(1, 3) + sqrt(x) + 1, x ) # == GS_POLY_CV_1
assert guess_solve_strategy( 4*x*(1 - sqrt(x)), x ) # == GS_POLY_CV_1
# polynomial equation multiplying both sides by x**n
assert guess_solve_strategy( x + 1/x + y, x ) # == GS_POLY_CV_2
def test_guess_rational_cv():
# rational functions
assert guess_solve_strategy( (x + 1)/(x**2 + 2), x) # == GS_RATIONAL
assert guess_solve_strategy(
(x - y**3)/(y**2*sqrt(1 - y**2)), y) # == GS_RATIONAL_CV_1
# rational functions via the change of variable y -> x**n
assert guess_solve_strategy( (sqrt(x) + 1)/(x**Rational(1, 3) + sqrt(x) + 1), x ) \
#== GS_RATIONAL_CV_1
def test_guess_transcendental():
#transcendental functions
assert guess_solve_strategy( exp(x) + 1, x ) # == GS_TRANSCENDENTAL
assert guess_sol
|
ve_strategy( 2*cos(x) - y, x ) # == GS_TRANSCENDENTAL
assert guess_solve_strategy(
exp(x) + exp(-x) - y, x ) # == GS_TRANSCENDENTAL
assert guess_solve_strategy(3**x - 10, x) # == GS_TRANSCENDENTAL
assert g
|
uess_solve_strategy(-3**x + 10, x) # == GS_TRANSCENDENTAL
assert guess_solve_strategy(a*x**b - y, x) # == GS_TRANSCENDENTAL
def test_solve_args():
# equation container, issue 5113
ans = {x: -3, y: 1}
eqs = (x + 5*y - 2, -3*x + 6*y - 15)
assert all(solve(container(eqs), x, y) == ans for container in
(tuple, list, set, frozenset))
assert solve(Tuple(*eqs), x, y) == ans
# implicit symbol to solve for
assert set(solve(x**2 - 4)) == set([S(2), -S(2)])
assert solve([x + y - 3, x - y - 5]) == {x: 4, y: -1}
assert solve(x - exp(x), x, implicit=True) == [exp(x)]
# no symbol to solve for
assert solve(42) == []
assert solve([1, 2]) == []
# duplicate symbols removed
assert solve((x - 3, y + 2), x, y, x) == {x: 3, y: -2}
# unordered symbols
# only 1
assert solve(y - 3, set([y])) == [3]
# more than 1
assert solve(y - 3, set([x, y])) == [{y: 3}]
# multiple symbols: take the first linear solution
assert solve(x + y - 3, [x, y]) == [{x: 3 - y}]
# unless it is an undetermined coefficients system
assert solve(a + b*x - 2, [a, b]) == {a: 2, b: 0}
assert solve(a*x**2 + b*x + c -
((x - h)**2 + 4*p*k)/4/p,
[h, p, k], exclude=[a, b, c], dict=True) == \
[{k: c - b**2/(4*a), h: -b/(2*a), p: 1/(4*a)}]
# failing undetermined system
assert solve(a*x + b**2/(x + 4) - 3*x - 4/x, a, b) == \
[{a: (-b**2*x + 3*x**3 + 12*x**2 + 4*x + 16)/(x**2*(x + 4))}]
# failed single equation
assert solve(1/(1/x - y + exp(y))) == []
raises(
NotImplementedError, lambda: solve(exp(x) + sin(x) + exp(y) + sin(y)))
# failed system
# -- when no symbols given, 1 fails
assert solve([y, exp(x) + x]) == [{x: -LambertW(1), y: 0}]
# both fail
assert solve(
(exp(x) - x, exp(y) - y)) == [{x: -LambertW(-1), y: -LambertW(-1)}]
# -- when symbols given
solve([y, exp(x) + x], x, y) == [(-LambertW(1), 0)]
# symbol is a number
assert solve(x**2 - pi, pi) == [x**2]
# no equations
assert solve([], [x]) == []
# overdetermined system
# - nonlinear
assert solve([(x + y)**2 - 4, x + y - 2]) == [{x: -y + 2}]
# - linear
assert solve((x + y - 2, 2*x + 2*y - 4)) == {x: -y + 2}
def test_solve_polynomial1():
assert solve(3*x - 2, x) == [Rational(2, 3)]
assert solve(Eq(3*x, 2), x) == [Rational(2, 3)]
assert set(solve(x**2 - 1, x)) == set([-S(1), S(1)])
assert set(solve(Eq(x**2, 1), x)) == set([-S(1), S(1)])
assert solve(x - y**3, x) == [y**3]
assert set(solve(x - y**3, y)) == set([
(-x**Rational(1, 3))/2 + I*sqrt(3)*x**Rational(1, 3)/2,
x**Rational(1, 3),
(-x**Rational(1, 3))/2 - I*sqrt(3)*x**Rational(1, 3)/2,
])
a11, a12, a21, a22, b1, b2 = symbols('a11,a12,a21,a22,b1,b2')
assert solve([a11*x + a12*y - b1, a21*x + a22*y - b2], x, y) == \
{
x: (a22*b1 - a12*b2)/(a11*a22 - a12*a21),
y: (a11*b2 - a21*b1)/(a11*a22 - a12*a21),
}
solution = {y: S.Zero, x: S.Zero}
assert solve((x - y, x + y), x, y ) == solution
assert solve((x - y, x + y), (x, y)) == solution
assert solve((x - y, x + y), [x, y]) == solution
assert set(solve(x**3 - 15*x - 4, x)) == set([
-2 + 3**Rational(1, 2),
S(4),
-2 - 3**Rational(1, 2)
])
assert set(solve((x**2 - 1)**2 - a, x)) == \
set([sqrt(1 + sqrt(a)), -sqrt(1 + sqrt(a)),
sqrt(1 - sqrt(a)), -sqrt(1 - sqrt(a))])
def test_solve_polynomial2():
assert solve(4, x) == []
def test_solve_polynomial_cv_1a():
"""
Test for solving on equations that can be converted to a polynomial equation
using the change of variable y -> x**Rational(p, q)
"""
assert solve( sqrt(x) - 1, x) == [1]
assert solve( sqrt(x) - 2, x) == [4]
assert solve( x**Rational(1, 4) - 2, x) == [16]
assert solve( x**Rational(1, 3) - 3, x) == [27]
assert solve(sqrt(x) + x**Rational(1, 3) + x**Rational(1, 4), x) == [0]
def test_solve_polynomial_cv_1b():
assert set(solve(4*x*(1 - a*sqrt(x)), x)) == set([S(0), 1/a**2])
assert set(solve(x * (x**(S(1)/3) - 3), x)) == set([S(0), S(27)])
def test_solve_polynomial_cv_2():
"""
Test for solving on equations that can be converted to a polynomial equation
multiplying both sides of the equation by x**m
"""
assert solve(x + 1/x - 1, x) in \
[[ Rational(1, 2) + I*sqrt(3)/2, Rational(1, 2) - I*sqrt(3)/2],
[ Rational(1, 2) - I*sqrt(3)/2, Rational(1, 2)
|
msettles/expHTS
|
expHTS/spadesCMD.py
|
Python
|
apache-2.0
| 3,121
| 0.006729
|
from validate_app import validateApp
import os
from distutils import spawn
import sys
from parse_files import parseOutHTseq, bringTogether
from bashSub import bashSub
def checkPreprocessApplications():
applications = ["spades.py"]
source = ["http://bioinf.spbau.ru/spades"]
i = 0;
for app in applications:
if spawn.find_executable(app) is None:
sys.stderr.write("It doesn't look like you have app - " + app + "\n")
sys.stderr.write("Download it here - " + source[i] + "\n");
exit(0)
else:
sys.stderr.write(app + " found\n")
i += 0
def returnReads(dictSampleSeqFiles):
SE = ""
PE1 = ""
PE2 = ""
# data struct
# { (sampleKey, seqKey) : [[SE], [SE], [PE1, PE2], [PE1, PE2]] }
# diving into each of the sub lists in the dictionary value key
for e in dictSampleSeqFiles:
# if sublist only has one elment then it is SE read
if len(e) == 1:
if SE == "":
SE = e[0]
else:
SE += "," + e[0]
else:
if PE1 == "":
PE1 = e[0]
PE2 = e[1]
else:
PE1 += ","
|
+ e[0]
PE2 += "," + e[1]
return [SE, PE1, PE2]
def check_dir(Dir):
if not os.path.exists(Dir):
os.mkdir(Dir)
class spadesCMD:
def __init__(self):
self.metaDataFolder = "MetaData"
def execute(self, args):
time = 0
checkPreprocessApplications()
|
;
logFiles = []
# checkPreprocessApplications()
validate = validateApp()
validate.setValidation(True)
dictSampleSeqFiles = validate.validateSampleSheet(args.readFolder, args.spadesFolder, args.samplesFile, args.force, True)
#keys tuple 0 location being input folder
#1 location being output folder location
for keys in dictSampleSeqFiles.keys():
check_dir(args.spadesFolder)
check_dir(keys[1])
terminal = []
#countFile = os.path.join(keys[1], keys[0].split("/")[-1]) + ".counts"
print dictSampleSeqFiles[keys]
if (len(dictSampleSeqFiles[keys][1]) == 3):
terminal.append(bashSub("spades.py", dictSampleSeqFiles[keys][1], ['-1', '-2', '-s'], " --careful -t " + args.threads + " -o " + keys[1] + " -m " + args.memory, ''))
elif (len(dictSampleSeqFiles[keys][1]) == 2):
terminal.append(bashSub("spades.py", dictSampleSeqFiles[keys][1], ['-1', '-2'], "--careful -t " + args.threads + " -o " + keys[1] + " -m " + args.memory, ''))
print terminal[-1].getCommand()
terminal[-1].runCmd("")
sys.stderr.flush()
#time += runSortByName.returnTime() + runView.returnTime() + htseqCmd.returnTime()
#logFiles.append(parseOutHTseq(keys[1], keys[1].split("/")[-1]))
#bringTogether(logFiles, os.path.join(args.finalDir, "Counts_Summary.log"))
print "Total amount of seconds to run all samples"
print "Seconds: " + str(time)
|
pmakahmann/CL_Project
|
CL_Project/settings.py
|
Python
|
mit
| 3,282
| 0.001219
|
"""
Django settings for CL_Project project.
Generated by 'django-admin startproject' using Django 1.9.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'p*zbr4!vavk7^si=#d_w7vl-_lvd=3g_fpus-nrv&e^%+57fel'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.content
|
types',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'songs',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.m
|
iddleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'CL_Project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates', 'songs/templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'CL_Project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'assets'),
)
|
jbuchbinder/youtube-dl
|
youtube_dl/extractor/prosiebensat1.py
|
Python
|
unlicense
| 17,378
| 0.002303
|
# coding: utf-8
from __future__ import unicode_literals
import re
|
from hashlib import sha1
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
ExtractorError,
determine_ext,
float_or_none,
int_or_none,
unified_strdate,
)
class ProSiebenSat1BaseIE(InfoExtractor):
def _extract_video_info(self, url, clip_id):
client_location = url
video = self._download_json(
'http://vas.sim-technik.de/vas/live/v2/videos',
clip_id, 'Downloading videos JSON', query=
|
{
'access_token': self._TOKEN,
'client_location': client_location,
'client_name': self._CLIENT_NAME,
'ids': clip_id,
})[0]
if video.get('is_protected') is True:
raise ExtractorError('This video is DRM protected.', expected=True)
duration = float_or_none(video.get('duration'))
source_ids = [compat_str(source['id']) for source in video['sources']]
client_id = self._SALT[:2] + sha1(''.join([clip_id, self._SALT, self._TOKEN, client_location, self._SALT, self._CLIENT_NAME]).encode('utf-8')).hexdigest()
sources = self._download_json(
'http://vas.sim-technik.de/vas/live/v2/videos/%s/sources' % clip_id,
clip_id, 'Downloading sources JSON', query={
'access_token': self._TOKEN,
'client_id': client_id,
'client_location': client_location,
'client_name': self._CLIENT_NAME,
})
server_id = sources['server_id']
def fix_bitrate(bitrate):
bitrate = int_or_none(bitrate)
if not bitrate:
return None
return (bitrate // 1000) if bitrate % 1000 == 0 else bitrate
formats = []
for source_id in source_ids:
client_id = self._SALT[:2] + sha1(''.join([self._SALT, clip_id, self._TOKEN, server_id, client_location, source_id, self._SALT, self._CLIENT_NAME]).encode('utf-8')).hexdigest()
urls = self._download_json(
'http://vas.sim-technik.de/vas/live/v2/videos/%s/sources/url' % clip_id,
clip_id, 'Downloading urls JSON', fatal=False, query={
'access_token': self._TOKEN,
'client_id': client_id,
'client_location': client_location,
'client_name': self._CLIENT_NAME,
'server_id': server_id,
'source_ids': source_id,
})
if not urls:
continue
if urls.get('status_code') != 0:
raise ExtractorError('This video is unavailable', expected=True)
urls_sources = urls['sources']
if isinstance(urls_sources, dict):
urls_sources = urls_sources.values()
for source in urls_sources:
source_url = source.get('url')
if not source_url:
continue
protocol = source.get('protocol')
mimetype = source.get('mimetype')
if mimetype == 'application/f4m+xml' or 'f4mgenerator' in source_url or determine_ext(source_url) == 'f4m':
formats.extend(self._extract_f4m_formats(
source_url, clip_id, f4m_id='hds', fatal=False))
elif mimetype == 'application/x-mpegURL':
formats.extend(self._extract_m3u8_formats(
source_url, clip_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
else:
tbr = fix_bitrate(source['bitrate'])
if protocol in ('rtmp', 'rtmpe'):
mobj = re.search(r'^(?P<url>rtmpe?://[^/]+)/(?P<path>.+)$', source_url)
if not mobj:
continue
path = mobj.group('path')
mp4colon_index = path.rfind('mp4:')
app = path[:mp4colon_index]
play_path = path[mp4colon_index:]
formats.append({
'url': '%s/%s' % (mobj.group('url'), app),
'app': app,
'play_path': play_path,
'player_url': 'http://livepassdl.conviva.com/hf/ver/2.79.0.17083/LivePassModuleMain.swf',
'page_url': 'http://www.prosieben.de',
'tbr': tbr,
'ext': 'flv',
'format_id': 'rtmp%s' % ('-%d' % tbr if tbr else ''),
})
else:
formats.append({
'url': source_url,
'tbr': tbr,
'format_id': 'http%s' % ('-%d' % tbr if tbr else ''),
})
self._sort_formats(formats)
return {
'duration': duration,
'formats': formats,
}
class ProSiebenSat1IE(ProSiebenSat1BaseIE):
IE_NAME = 'prosiebensat1'
IE_DESC = 'ProSiebenSat.1 Digital'
_VALID_URL = r'''(?x)
https?://
(?:www\.)?
(?:
(?:
prosieben(?:maxx)?|sixx|sat1(?:gold)?|kabeleins(?:doku)?|the-voice-of-germany|7tv|advopedia
)\.(?:de|at|ch)|
ran\.de|fem\.com|advopedia\.de
)
/(?P<id>.+)
'''
_TESTS = [
{
# Tests changes introduced in https://github.com/rg3/youtube-dl/pull/6242
# in response to fixing https://github.com/rg3/youtube-dl/issues/6215:
# - malformed f4m manifest support
# - proper handling of URLs starting with `https?://` in 2.0 manifests
# - recursive child f4m manifests extraction
'url': 'http://www.prosieben.de/tv/circus-halligalli/videos/218-staffel-2-episode-18-jahresrueckblick-ganze-folge',
'info_dict': {
'id': '2104602',
'ext': 'flv',
'title': 'Episode 18 - Staffel 2',
'description': 'md5:8733c81b702ea472e069bc48bb658fc1',
'upload_date': '20131231',
'duration': 5845.04,
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.prosieben.de/videokatalog/Gesellschaft/Leben/Trends/video-Lady-Umstyling-f%C3%BCr-Audrina-Rebekka-Audrina-Fergen-billig-aussehen-Battal-Modica-700544.html',
'info_dict': {
'id': '2570327',
'ext': 'mp4',
'title': 'Lady-Umstyling für Audrina',
'description': 'md5:4c16d0c17a3461a0d43ea4084e96319d',
'upload_date': '20131014',
'duration': 606.76,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'Seems to be broken',
},
{
'url': 'http://www.prosiebenmaxx.de/tv/experience/video/144-countdown-fuer-die-autowerkstatt-ganze-folge',
'info_dict': {
'id': '2429369',
'ext': 'mp4',
'title': 'Countdown für die Autowerkstatt',
'description': 'md5:809fc051a457b5d8666013bc40698817',
'upload_date': '20140223',
'duration': 2595.04,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'This video is unavailable',
},
{
'url': 'http://www.sixx.de/stars-style/video/sexy-laufen-in-ugg-boots-clip',
'info_dict': {
'id': '2904997',
'ext': 'mp4',
'title': 'Sexy laufen in Ugg Boots',
|
ferreiro/Python-NoSQL-Security
|
assignment6/El_Coladero/coladero.py
|
Python
|
mit
| 2,952
| 0.006108
|
# -*- coding: utf-8 -*-
"""
@title:
|
El Coladero
@description: Aplicación web para detectar y corregir vulnerabilidades
@author: Enrique Martín Martín
@email: emartinm@ucm.es
"""
from bottle import run, template, get, post, request
import sqlite3
@get('/show_all_questions')
def show_all_questions():
conn = sqlite3.connect("database.db")
cur = conn.cursor()
query = """SELECT author,title,time,tags,id
FROM Questions
ORDER BY time DESC"""
cur.execute(query)
res = list(cur.
|
fetchall())
conn.close()
return template('messages.html', questions=res)
@post('/insert_question')
def insert_question():
author = request.forms['author']
title = request.forms['title']
tags = request.forms['tags']
body = request.forms['body']
conn = sqlite3.connect("database.db")
cur = conn.cursor()
qbody = """INSERT INTO Questions(author, title, tags, body, time)
VALUES ('{0}','{1}','{2}','{3}',CURRENT_TIMESTAMP)"""
query = qbody.format(author, title, tags, body)
cur.executescript(query)
conn.commit()
conn.close()
return "Pregunta insertada con exito"
@get('/show_question')
def show_question():
ident = request.query['id']
conn = sqlite3.connect("database.db")
cur = conn.cursor()
qbody1 = """SELECT author,title,time,tags,body
FROM Questions
WHERE id={0}"""
qbody2 = """SELECT author,time,body
FROM Replies
WHERE question_id={0}"""
query1 = qbody1.format(ident)
query2 = qbody2.format(ident)
cur.execute(query1)
question = cur.fetchone()
cur.execute(query2)
replies = list(cur.fetchall())
conn.close()
return template("message_detail.html", q=question, replies=replies, ident=ident)
@post('/insert_reply')
def insert_reply():
author = request.forms['author']
body = request.forms['body']
question_id = request.forms['question_id']
conn = sqlite3.connect('database.db')
cur = conn.cursor()
qbody = """INSERT INTO Replies(author,body,time,question_id)
VALUES ('{0}', '{1}', CURRENT_TIMESTAMP, {2})"""
query = qbody.format(author, body, question_id)
cur.execute(query)
conn.commit()
conn.close()
return "Contestación insertada con éxito"
@get('/search_question')
def search_question():
tag = request.query['tag']
conn = sqlite3.connect('database.db')
cur = conn.cursor()
qbody = """SELECT author,title,time,tags
FROM Questions
WHERE tags LIKE '%{0}%'
ORDER BY time DESC"""
print tag
print qbody.format(tag)
query = qbody.format(tag)
cur.execute(query)
res = list(cur.fetchall())
conn.close()
return template('messages_search.html', questions=res, tag=tag)
if __name__ == "__main__":
run(host='localhost',port=8080,debug=True)
|
ricleal/reductionServer
|
src/data/messages.py
|
Python
|
gpl-3.0
| 1,661
| 0.022878
|
'''
Created on Sep 27, 2013
@author: leal
Default JSON MESSAGES
'''
import ast
import logging
import config.config
logger = logging.getLogger(__name__)
class Messages(object):
'''
classdocs
'''
messageTemplate = """{
'success' : '%r',
'message' : '%s',
'details' : %r
}"""
@staticmethod
def success(message,details=''):
messageAsStr = Messages.messageTemplate%(True,message,details)
logger.debug(messageAsStr)
messageAsDic = ast.literal_eval(messageAsStr)
return messageAsDic
@staticmethod
def error(message,details=''):
messageAsStr = Messages.messageTemplate%(False,message,details)
logger.debug(messageAsStr)
messageAsDic = ast.literal_eval(messageAsStr)
return messageAsDic
@staticmethod
def errorDetailed(message,complementaryMessage,value):
details = """{
'message' : %r,
'value' : %r
}"""%(complementaryMessage,value)
messageAsStr = Messages.messageTemplate%(False,message,
as
|
t.literal_eval(details))
logger.debug(messageAsStr)
messageAsDic = ast.literal_eval(messageAsStr)
return messageAsDic
if __name__ == '__main__':
Messages.success("OK")
Messages.success("OK", "File received")
Messages.error("Error")
Messages.error("Error",details='There was an error processing XPTO.')
Messages.error("Error adding X.",details={'error' : 'xpto', 'valid' : [1,2,3]})
Messages.err
|
orDetailed("Error adding X.","Valid values are", [1,2,3,5])
|
waynezhanghk/FastCausation
|
features.py
|
Python
|
apache-2.0
| 23,932
| 0.013497
|
import numpy as np
from sklearn.base import BaseEstimator
import pandas as pd
import operator
from estimator_base import *
from features_base import *
BINARY = "Binary"
CATEGORICAL = "Categorical"
NUMERICAL = "Numerical"
class FeatureMapper:
def __init__(self, features):
self.features = features
def fit(self, X, y=None):
for feature_name in self.features:
extractor.fit(X[feature_name].values[:,np.newaxis], y)
def transform(self, X):
return X[self.features].as_matrix()
def fit_transform(self, X, y=None):
return self.transform(X)
class SimpleTransform(BaseEstimator):
def __init__(self, transformer):
self.transformer = transformer
def fit(self, X, y=None):
return self
def fit_transform(self, X, y=None):
return self.transform(X)
def transform(self, X, y=None):
return np.array([self.transformer(x) for x in X], ndmin=2).T
class MultiColumnTransform(BaseEstimator):
def __init__(self, transformer):
self.transformer = transformer
def fit(self, X, y=None):
return self
def fit_transform(self, X, y=None):
return self.transform(X)
def transform(self, X, y=None):
return np.array([self.transformer(*x[1]) for x in X.iterrows()], ndmin=2).T
def get_all_features():
all_features = [
('Max', 'A', SimpleTransform(max)),
('Max', 'B', SimpleTransform(max)),
('Min', 'A', SimpleTransform(min)),
('Min', 'B', SimpleTransform(min)),
('Numerical', 'A type', SimpleTransform(lambda x: int(numerical(x)))),
('Numerical', 'B type', SimpleTransform(lambda x: int(numerical(x)))),
('Sub', ['Numerical[A type]','Numerical[B type]'], MultiColumnTransform(operator.sub)),
('Abs', 'Sub[Numerical[A type],Numerical[B type]]', SimpleTransform(abs)),
('Number of Samples', 'A', SimpleTransform(len)),
('Log', 'Number of Samples[A]', SimpleTransform(np.log)),
('Number of Unique Samples', 'A', SimpleTransform(count_unique)),
('Number of Unique Samples', 'B', SimpleTransform(count_unique)),
('Max', ['Number of Unique Samples[A]','Number of Unique Samples[B]'], MultiColumnTransform(max)),
('Min', ['Number of Unique Samples[A]','Number of Unique Samples[B]'], MultiColumnTransform(min)),
('Sub', ['Number of Unique Samples[A]','Number of Unique Samples[B]'], MultiColumnTransform(operator.sub)),
('Abs', 'Sub[Number of Unique Samples[A],Number of Unique Samples[B]]', SimpleTransform(abs)),
('Log', 'Number of Unique Samples[A]', SimpleTransform(np.log)),
('Log', 'Number of Unique Samples[B]', SimpleTransform(np.log)),
('Max', ['Log[Number of Unique Samples[A]]','Log[Number of Unique Samples[B]]'], MultiColumnTransform(max)),
('Min', ['Log[Number of Unique Samples[A]]','Log[Number of Unique Samples[B]]'], MultiColumnTransform(min)),
('Sub', ['Log[Number of Unique Samples[A]]','Log[Number of Unique Samples[B]]'], MultiColumnTransform(operator.sub)),
('Abs', 'Sub[Log[Number of Unique Samples[A]],Log[Number of Unique Samples[B]]]', SimpleTransform(abs)),
('Ratio of Unique Samples', 'A', SimpleTransform(count_unique_ratio)),
('Ratio of Unique Samples', 'B', SimpleTransform(count_unique_ratio)),
('Max', ['Ratio of Unique Samples[A]','Ratio of Unique Samples[B]'], MultiColumnTransform(max)),
('Min', ['Ratio of Unique Samples[A]','Ratio of Unique Samples[B]'], MultiColumnTransform(min)),
('Sub', ['Ratio of Unique Samples[A]','Ratio of Unique Samples[B]'], MultiColumnTransform(operator.sub)),
('Abs', 'Sub[Ratio of Unique Samples[A],Ratio of Unique Samples[B]]', SimpleTransform(abs)),
('Normalized Value', ['A','A type'], MultiColumnTransform(normalize)),
('Normalized Value', ['B','B type'], MultiColumnTransform(normalize)),
('Count Value', ['A','A type'], MultiColumnTransform(count_value), ['Normalized Value[A,A type]']),
('Count Value', ['B','B type'], MultiColumnTransform(count_value), ['Normalized Value[B,B type]']),
('DisSeq', ['A','A type'], MultiColumnTransform(discrete_seq)),
('DisSeq', ['B','B type'], MultiColumnTransform(discrete_seq)),
('DisProb', ['A','A type'], MultiColumnTransform(discrete_probability), ['DisSeq[A,A type]']),
('DisProb', ['B','B type'], MultiColumnTransform(discrete_probability), ['DisSeq[B,B type]']),
('Normalized Entropy Baseline', ['A','A type'], MultiColumnTransform(normalized_entropy_baseline), ['Normalized Value[A,A type]']),
('Normalized Entropy Baseline', ['B','B type'], MultiColumnTransform(normalized_entropy_baseline), ['Normalized Value[B,B type]']),
('Max', ['Normalized Entropy Baseline[A,A type]','Normalized Entropy Baseline[B,B type]'], MultiColumnTransform(max)),
('Min', ['Normalized Entropy Baseline[A,A type]','Normalized Entropy Baseline[B,B type]'], MultiColumnTransform(min)),
('Sub', ['Normalized Entropy Baseline[A,A type]','Normalized Entropy Baseline[B,B type]'], MultiColumnTransform(operator.sub)),
('Abs', 'Sub[Normalized Entropy Baseline[A,A type],Normalized Entropy Baseline[B,B type]]', SimpleTransform(abs)),
('Normalized Entropy', ['A','A type'], MultiColumnTransform(normalized_entropy), ['Count Value[A,A type]']),
('Normalized Entropy', ['B','B type'], MultiColumnTransform(normalized_entropy), ['Count Value[B,B type]']),
('Max', ['Normalized Entropy[A,A type]','Normalized Entropy[B,B type]'], MultiColumnTransform(max)),
('Min', ['Normalized Entropy[A,A type]','Normalized Entropy[B,B type]'], MultiColumnTransform(min)),
('Sub', ['Normalized Entropy[A,A type]','Normalized Entropy[B,B type]'], MultiColumnTransform(operator.sub)),
('Abs', 'Sub[Normalized Entropy[A,A type],Normalized Entropy[B,B type]]', SimpleTransform(abs)),
('IGCI', ['A','A type','B','B type'], MultiColumnTransform(igci), ['Normalized Value[A,A type]', 'Normalized Value[B,B type]']),
('IGCI', ['B','B type','A','A type'], MultiColumnTransform(
|
igci), ['Normalized Value[B,B type]', 'Normalized Value[A,A type]']),
('Sub', ['IGCI[A,A type,B,B type]','IGCI[B,B type,A,A type]'], MultiColumnTransform(operator.sub)),
('Abs', 'Sub[IGCI[A,A type,B,B type],IGCI[B,B type,A,A type]]', SimpleTr
|
ansform(abs)),
('Gaussian Divergence', ['A','A type'], MultiColumnTransform(gaussian_divergence), ['Count Value[A,A type]']),
('Gaussian Divergence', ['B','B type'], MultiColumnTransform(gaussian_divergence), ['Count Value[B,B type]']),
('Max', ['Gaussian Divergence[A,A type]','Gaussian Divergence[B,B type]'], MultiColumnTransform(max)),
('Min', ['Gaussian Divergence[A,A type]','Gaussian Divergence[B,B type]'], MultiColumnTransform(min)),
('Sub', ['Gaussian Divergence[A,A type]','Gaussian Divergence[B,B type]'], MultiColumnTransform(operator.sub)),
('Abs', 'Sub[Gaussian Divergence[A,A type],Gaussian Divergence[B,B type]]', SimpleTransform(abs)),
('Uniform Divergence', ['A','A type'], MultiColumnTransform(uniform_divergence), ['Count Value[A,A type]']),
('Uniform Divergence', ['B','B type'], MultiColumnTransform(uniform_divergence), ['Count Value[B,B type]']),
('Max', ['Uniform Divergence[A,A type]','Uniform Divergence[B,B type]'], MultiColumnTransform(max)),
('Min', ['Uniform Divergence[A,A type]','Uniform Divergence[B,B type]'], MultiColumnTransform(min)),
('Sub', ['Uniform Divergence[A,A type]','Uniform Divergence[B,B type]'], MultiColumnTransform(operator.sub)),
('Abs', 'Sub[Uniform Divergence[A,A type],Uniform Divergence[B,B type]]', SimpleTransform(abs)),
('Discrete Entropy', ['A','A type'], MultiColumnTransform(discrete_entropy), ['DisProb[A,A type]']),
('Discrete Entropy', ['B','B type'], MultiColumnTransform(discrete_entropy), ['DisProb[B,B type]']),
('Max', ['Discrete Entropy[A,A type]','Discrete Entropy[B,B type]'], MultiColumnTransform(max)),
('Min', ['Discrete Entr
|
dimatura/opendr
|
test_geometry.py
|
Python
|
mit
| 1,656
| 0.008454
|
#!/usr/bin/env python
# encoding: utf-8
"""
Author(s): Matthew Loper
See LICENCE.txt for licensing and contact information.
"""
import sys
import os
import unittest
import chumpy as ch
from chumpy import Ch
import numpy as np
from util_tests import get_earthmesh
class TestGeometry(unittest.TestCase):
def setUp(self):
np.random.seed(0)
def test_rodrigues(self):
from geometry import Rodrigues
rt =
|
np.random.randn(3)
rt2 = rt + np.random.rand(3)*1e-5
foo1 = Rodrigues(rt = rt)
foo2 = Rodrigues(rt = rt2)
empirical = (foo2.r - foo1.r).flatten()
predicted = foo1.
|
dr_wrt(foo1.rt).dot(rt2-rt)
self.assertTrue(np.max(np.abs(empirical - predicted)) < 1e-10)
def test_vert_normals(self):
from geometry import VertNormals
import numpy as np
mesh = get_earthmesh(np.zeros(3), np.zeros(3))
v, f = mesh.v*127., mesh.f
vn1 = VertNormals(f=f, v=v)
dr_predicted = vn1.dr_wrt(vn1.v).copy()
eps = .00001 * np.random.randn(v.size).reshape(v.shape)
v += eps
vn2 = VertNormals(v=v, f=f)
empirical_diff = (vn2.r - vn1.r).reshape((-1,3))
predicted_diff = dr_predicted.dot(eps.flatten()).reshape((-1,3))
if False:
print np.max(np.abs(empirical_diff-predicted_diff))
print empirical_diff[:6]
print predicted_diff[:6]
self.assertTrue(np.max(np.abs(empirical_diff-predicted_diff)) < 6e-13)
suite = unittest.TestLoader().loadTestsFromTestCase(TestGeometry)
if __name__ == '__main__':
unittest.main()
|
bmcfee/librosa
|
librosa/onset.py
|
Python
|
isc
| 19,286
| 0.001141
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Onset detection
===============
.. autosummary::
:toctree: generated/
onset_detect
onset_backtrack
onset_strength
onset_strength_multi
"""
import numpy as np
import scipy
from ._cache import cache
from . import core
from . import util
from .util.exceptions import ParameterError
from .feature.spectral import melspectrogram
__all__ = ["onset_detect", "onset_strength", "onset_strength_multi", "onset_backtrack"]
def onset_detect(
y=None,
sr=22050,
onset_envelope=None,
hop_length=512,
backtrack=False,
energy=None,
units="frames",
normalize=True,
**kwargs,
):
"""Locate note onset events by picking peaks in an onset strength envelope.
The `peak_pick` parameters were chosen by large-scale hyper-parameter
optimization over the dataset provided by [#]_.
.. [#] https://github.com/CPJKU/onset_db
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
sr : number > 0 [scalar]
sampling rate of ``y``
onset_envelope : np.ndarray [shape=(m,)]
(optional) pre-computed onset strength envelope
hop_length : int > 0 [scalar]
hop length (in samples)
units : {'frames', 'samples', 'time'}
The units to encode detected onset events in.
By default, 'frames' are used.
backtrack : bool
If ``True``, detected onset events are backtracked to the nearest
preceding minimum of ``energy``.
This is primarily useful when using onsets as slice points for segmentation.
energy : np.ndarray [shape=(m,)] (optional)
An energy function to use for backtracking detected onset events.
If none is provided, then ``onset_envelope`` is used.
normalize : bool
If ``True`` (default), normalize the onset envelope to have minimum of 0 and
maximum of 1 prior to detection. This is helpful for standardizing the
parameters of `librosa.util.peak_pick`.
Otherwise, the onset envelope is left unnormalized.
kwargs : additional keyword arguments
Additional parameters for peak picking.
See `librosa.util.peak_pick` for details.
Returns
-------
onsets : np.ndarray [shape=(n_onsets,)]
estimated positions of detected onsets, in whichever units
are specified. By default, frame indices.
.. note::
If no onset strength could be detected, onset_detect returns
an empty list.
Raises
------
ParameterError
if neither ``y`` nor ``onsets`` are provided
or if ``units`` is not one of 'frames', 'samples', or 'time'
See Also
--------
onset_strength : compute onset strength per-frame
onset_backtrack : backtracking onset events
librosa.util.peak_pick : pick peaks from a time series
Examples
--------
Get onset times from a signal
>>> y, sr = librosa.load(librosa.ex('trumpet'))
>>> librosa.onset.onset_detect(y=y, sr=s
|
r, units='time')
array([0.07 , 0.232,
|
0.395, 0.604, 0.743, 0.929, 1.045, 1.115,
1.416, 1.672, 1.881, 2.043, 2.206, 2.368, 2.554, 3.019])
Or use a pre-computed onset envelope
>>> o_env = librosa.onset.onset_strength(y, sr=sr)
>>> times = librosa.times_like(o_env, sr=sr)
>>> onset_frames = librosa.onset.onset_detect(onset_envelope=o_env, sr=sr)
>>> import matplotlib.pyplot as plt
>>> D = np.abs(librosa.stft(y))
>>> fig, ax = plt.subplots(nrows=2, sharex=True)
>>> librosa.display.specshow(librosa.amplitude_to_db(D, ref=np.max),
... x_axis='time', y_axis='log', ax=ax[0])
>>> ax[0].set(title='Power spectrogram')
>>> ax[0].label_outer()
>>> ax[1].plot(times, o_env, label='Onset strength')
>>> ax[1].vlines(times[onset_frames], 0, o_env.max(), color='r', alpha=0.9,
... linestyle='--', label='Onsets')
>>> ax[1].legend()
"""
# First, get the frame->beat strength profile if we don't already have one
if onset_envelope is None:
if y is None:
raise ParameterError("y or onset_envelope must be provided")
onset_envelope = onset_strength(y=y, sr=sr, hop_length=hop_length)
# Shift onset envelope up to be non-negative
# (a common normalization step to make the threshold more consistent)
if normalize:
# Normalize onset strength function to [0, 1] range
onset_envelope = onset_envelope - onset_envelope.min()
# Max-scale with safe division
onset_envelope /= np.max(onset_envelope) + util.tiny(onset_envelope)
# Do we have any onsets to grab?
if not onset_envelope.any() or not np.all(np.isfinite(onset_envelope)):
onsets = np.array([], dtype=np.int)
else:
# These parameter settings found by large-scale search
kwargs.setdefault("pre_max", 0.03 * sr // hop_length) # 30ms
kwargs.setdefault("post_max", 0.00 * sr // hop_length + 1) # 0ms
kwargs.setdefault("pre_avg", 0.10 * sr // hop_length) # 100ms
kwargs.setdefault("post_avg", 0.10 * sr // hop_length + 1) # 100ms
kwargs.setdefault("wait", 0.03 * sr // hop_length) # 30ms
kwargs.setdefault("delta", 0.07)
# Peak pick the onset envelope
onsets = util.peak_pick(onset_envelope, **kwargs)
# Optionally backtrack the events
if backtrack:
if energy is None:
energy = onset_envelope
onsets = onset_backtrack(onsets, energy)
if units == "frames":
pass
elif units == "samples":
onsets = core.frames_to_samples(onsets, hop_length=hop_length)
elif units == "time":
onsets = core.frames_to_time(onsets, hop_length=hop_length, sr=sr)
else:
raise ParameterError("Invalid unit type: {}".format(units))
return onsets
def onset_strength(
y=None,
sr=22050,
S=None,
lag=1,
max_size=1,
ref=None,
detrend=False,
center=True,
feature=None,
aggregate=None,
**kwargs,
):
"""Compute a spectral flux onset strength envelope.
Onset strength at time ``t`` is determined by::
mean_f max(0, S[f, t] - ref[f, t - lag])
where ``ref`` is ``S`` after local max filtering along the frequency
axis [#]_.
By default, if a time series ``y`` is provided, S will be the
log-power Mel spectrogram.
.. [#] Böck, Sebastian, and Gerhard Widmer.
"Maximum filter vibrato suppression for onset detection."
16th International Conference on Digital Audio Effects,
Maynooth, Ireland. 2013.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time-series
sr : number > 0 [scalar]
sampling rate of ``y``
S : np.ndarray [shape=(d, m)]
pre-computed (log-power) spectrogram
lag : int > 0
time lag for computing differences
max_size : int > 0
size (in frequency bins) of the local max filter.
set to `1` to disable filtering.
ref : None or np.ndarray [shape=(d, m)]
An optional pre-computed reference spectrum, of the same shape as ``S``.
If not provided, it will be computed from ``S``.
If provided, it will override any local max filtering governed by ``max_size``.
detrend : bool [scalar]
Filter the onset strength to remove the DC component
center : bool [scalar]
Shift the onset function by ``n_fft // (2 * hop_length)`` frames.
This corresponds to using a centered frame analysis in the short-time Fourier
transform.
feature : function
Function for computing time-series features, eg, scaled spectrograms.
By default, uses `librosa.feature.melspectrogram` with ``fmax=11025.0``
aggregate : function
Aggregation function to use when combining onsets
at different frequency bins.
Default: `np.mean`
kwargs : additional keyword arguments
Additional parameters to ``feature()``, if ``S`` is not provided.
Returns
-------
|
bschoenfeld/va-court-scraper
|
courtutils/user.py
|
Python
|
mit
| 990
| 0.00303
|
import hashlib
import os
from database import Database
from email import send_welcome_email
from flask.ext.login import UserMixin
def get_hash(data):
hash = hashlib.sha256()
hash.update(os.environ['PASSWORD_TOKEN_SALT'])
hash.update(
|
data)
return hash.hexdigest()
class User(UserMixin):
def _
|
_init__(self, user):
self.id = user['email']
self.user = user
@classmethod
def get(cls, id):
return User(Database.get_user(id))
@classmethod
def registered(cls, email):
return Database.get_user(email) != None
@classmethod
def create(cls, email):
Database.add_user(email)
send_welcome_email(email)
@classmethod
def update_password(cls, email, password):
Database.set_user_password(email, get_hash(password))
@classmethod
def login(cls, email, password):
user = Database.confirm_credentials(email, get_hash(password))
return None if user is None else User(user)
|
ddboline/Garmin-Forerunner-610-Extractor_fork
|
ant/base/ant.py
|
Python
|
mit
| 12,100
| 0.003884
|
# Ant
#
# Copyright (c) 2012, Gustav Tiger <gustav@tiger.name>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import array
import collections
import struct
import threading
import time
import Queue
import logging
import usb
from message import Message
from commons import format_list
from driver import find_driver
_logger = logging.getLogger("garmin.ant.base.ant")
class Ant():
_RESET_WAIT = 1
def __init__(self):
self._driver = find_driver()
self._message_queue_cond = threading.Condition()
self._message_queue = collections.deque()
self._events = Queue.Queue()
self._buffer = array.array('B', [])
self._burst_data = array.array('B', [])
self._last_data = array.array('B', [])
self._running = True
self._driver.open()
self._worker_thread = threading.Thread(target=self._worker, name="ant.base")
self._worker_thread.start()
self.reset_system()
def start(self):
self._main()
def stop(self):
if self._running:
_logger.debug("Stoping ant.base")
self._running = False
self._worker_thread.join()
def _on_broadcast(self, message):
self._events.put(('event', (message._data[0],
Message.Code.EVENT_RX_BROADCAST, message._data[1:])))
def _on_acknowledge(self, message):
self._events.put(('event', (message._data[0],
Message.Code.EVENT_RX_ACKNOWLEDGED, message._data[1:])))
def _on_burst_data(self, message):
sequence = message._data[0] >> 5
channel = message._data[0] & 0b00011111
data = message._data[1:]
# First sequence
if sequence == 0:
self._burst_data = data
# Other
else:
self._burst_data.extend(data)
# Last sequence (indicated by bit 3)
if sequence & 0b100 != 0:
self._events.put(('event', (channel,
Message.Code.EVENT_RX_BURST_PACKET, self._burst_data)))
def _worker(self):
_logger.debug("Ant runner started")
while self._running:
try:
message = self.read_message()
if message == None:
break
# TODO: flag and extended for broadcast, acknowledge, and burst
# Only do callbacks for new data. Resent data only indicates
# a new channel timeslot.
if not (message._id == Message.ID.BROADCAST_DATA and
message._data == self._last_data):
# Notifications
if message._id in [Message.ID.STARTUP_MESSAGE, \
Message.ID.SERIAL_ERROR_MESSAGE]:
self._events.put(('response', (None, message._id,
message._data)))
# Response (no channel)
elif message._id in [Message.ID.RESPONSE_VERSION, \
Message.ID.RESPONSE_CAPABILITIES, \
Message.ID.RESPONSE_SERIAL_NUMBER]:
self._events.put(('response', (None, message._id,
message._data)))
# Response (channel)
elif message._id in [Message.ID.RESPONSE_CHANNEL_STATUS, \
Message.ID.RESPONSE_CHANNEL_ID]:
self._events.put(('response', (message._data[0],
message._id, message._data[1:])))
# Response (other)
elif (message._id == Message.ID.RESPONSE_CHANNEL \
and message._data[1] != 0x01):
self._events.put(('response', (message._data[0],
message._data[1], message._data[2:])))
# Channel event
elif message._id == Message.ID.BROADCAST_DATA:
self._on_broadcast(message)
elif message._id == Message.ID.ACKNOWLEDGE_DATA:
self._on_acknowledge(message)
elif message._id == Message.ID.BURST_TRANSFER_DATA:
self._on_burst_data(message)
elif message._id == Message.ID.RESPONSE_CHANNEL:
_logger.debug("Got channel event, %r", message)
self._events.put(('event', (message._data[0],
message._data[1], message._data[2:])))
else:
_logger.warning("Got unknown message, %r", message)
else:
_logger.debug("No new data this period")
# Send messages in queue, on indicated time slot
if message._id == Message.ID.BROADCAST_DATA:
time.sleep(0.1)
_logger.debug("Got broadcast data, examine queue to see if we should send anything back")
if self._message_queue_cond.acquire(blocking=False):
while len(self._message_queue) > 0:
m = self._message_queue.popleft()
self.write_message(m)
_log
|
ger.debug(" - sent message from queue, %r", m)
if(m._id != Message.ID.BURST_TRANSFER_DATA or \
m._data[0] & 0b10000000):# or m._data[0] == 0):
break
else:
_logger.debug(" - no messages in queue")
|
self._message_queue_cond.release()
self._last_data = message._data
except usb.USBError as e:
_logger.warning("%s, %r", type(e), e.args)
_logger.debug("Ant runner stopped")
def _main(self):
while self._running:
try:
(event_type, event) = self._events.get(True, 1.0)
self._events.task_done()
(channel, event, data) = event
if event_type == 'response':
self.response_function(channel, event, data)
elif event_type == 'event':
self.channel_event_function(channel, event, data)
else:
_logger.warning("Unknown message typ '%s': %r", event_type, event)
except Queue.Empty as e:
pass
def write_message_timeslot(self, message):
with self._message_queue_cond:
self._message_queue.append(message)
def write_message(self, message):
data = message.get()
self._driver.write(data)
_logger.debug("Write data: %s", format_list(data))
def read_message(self):
while self._running:
# If we have a message in buffer already, return it
if len(self._buffer) >= 5 and len(self._buffer) >= self._buffer[1] + 4:
packet = self._buffer[:self._buffer[1] + 4]
|
bsmr-eve/Pyfa
|
eos/effects/usemissiles.py
|
Python
|
gpl-3.0
| 1,203
| 0.002494
|
# useMissiles
#
# Used by:
# Modules from group: Missile Launcher Heavy (12 of 12)
# Modules from group: Missile Launcher Rocket (15 of 15)
# Modules named like: Launcher (154 of 154)
# Structure Modules named like: Standup Launcher (7 of 7)
type = 'active', "projected"
def handler(fit, src, context):
# Set reload time to 10 seconds
src.reloadTime = 10000
if "projected" in context:
if src.item.group.name == 'Missile Launcher Bomb':
# Bomb Launcher Cooldown Timer
moduleReactivationDelay = src.getModifiedItemAttr("moduleReactivationDelay")
speed = src.getModifiedItemAttr("speed")
# Void and Focused Void Bombs
neutAmount = src.getModifiedChargeAttr("energyNeutralizerAmount")
if moduleReactivationDelay and neutAmount and speed:
fit.addDr
|
ain(src, speed + moduleReactivationDelay, neutAmount, 0)
# Lockbreaker Bombs
ecmStrengthBonus = src.getMod
|
ifiedChargeAttr("scan{0}StrengthBonus".format(fit.scanType))
if ecmStrengthBonus:
strModifier = 1 - ecmStrengthBonus / fit.scanStrength
fit.ecmProjectedStr *= strModifier
|
joebowen/ChannelWormDjango
|
ChannelWorm/channelworm/settings.py
|
Python
|
mit
| 3,520
| 0
|
"""
Django settings for channel_worm project.
Generated by 'django-admin startproject' using Django 1.8.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'd0vy02-g#nq@lg!s%5v$w(jilj@af791#1-3k9y7ea3c)djj!w'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
|
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'digitizer',
'ion_channel'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.comm
|
on.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'channelworm.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(os.path.dirname(__file__), 'templates', )],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'channelworm.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
# Pycharm detected this
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
TEMPLATE_DIRS = (
os.path.join(PROJECT_ROOT, 'templates').replace('\\', '/'),
)
# TEMPLATE_LOADERS = (
# 'django.template.loaders.filesystem.Loader',
# 'django.template.loaders.app_directories.Loader',
# )
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(
os.path.dirname(__file__),
'static',
),
)
MEDIA_ROOT = os.path.join(os.path.dirname(__file__), 'media')
MEDIA_URL = '/media/'
|
relman/sevpn-mgmt-py
|
SevpnMgmtPy/admin_api/traffic.py
|
Python
|
mit
| 1,143
| 0.000875
|
# -*- coding: utf-8 -*-
class TrafficEntry:
def __init__(self, broadcast_count=0L, broadcast_bytes=0L, unicast_count=0L, unicast_bytes=0L):
self.broadcast_count = broadcast_count
self.broadcast_bytes = broadcast_bytes
self.unicast_count = unicast_count
self.unicast_bytes = unicast_bytes
class Traffic:
def __init__(self, send=TrafficEntry(), recv=TrafficEntry()):
self.send = send
self.recv = recv
def in_rpc_traffic(self, pack):
if pack is None:
return
self.recv.broadcast_bytes = pack.get_va
|
lue("Recv.BroadcastBytes")
self.r
|
ecv.broadcast_count = pack.get_value("Recv.BroadcastCount")
self.recv.unicast_bytes = pack.get_value("Recv.UnicastBytes")
self.recv.unicast_count = pack.get_value("Recv.UnicastCount")
self.send.broadcast_bytes = pack.get_value("Send.BroadcastBytes")
self.send.broadcast_count = pack.get_value("Send.BroadcastCount")
self.send.unicast_bytes = pack.get_value("Send.UnicastBytes")
self.send.unicast_count = pack.get_value("Send.UnicastCount")
|
dynaryu/inasafe
|
safe/storage/test/test_raster.py
|
Python
|
gpl-3.0
| 2,520
| 0.002778
|
# coding=utf-8
"""**Tests for safe raster layer class**
contains tests for QGIS specific methods.
See test_io.py also
"""
__author__ = 'Dmitry Kolesov <kolesov.dm@gmail.com>'
__revision__ = '$Format:%H$'
__date__ = '28/12/2013'
__license__ = "GPL"
__copyright__ = 'Copyright 2012, Australia Indonesia Facility for '
__copyright__ += 'Disaster Reduction'
import os
import logging
import unittest
from qgis.core import QgsRasterLayer
from safe.storage.utilities import read_keywords
from safe.storage.raster import Raster
from safe.test.utilities import test_data_path, get_qgis_app
QGIS_APP, CANVAS, IFACE, PARENT = get_qgis_app()
LOGGER =
|
logging.getLogger('InaSAFE')
KEYWORD_PATH = test_data_path('hazard', 'jakarta_flood_design.xml')
RASTER_BASE = test_data_path('hazard', 'jakarta_flood_design')
class RasterTest(unittest.TestCase):
def setUp(self):
msg = 'Keyword file does not exist at %s' % KEYWORD_PATH
assert os.path.exists(KEYWORD_PATH), msg
def test_qgis_raster_layer_loa
|
ding(self):
"""Test that reading from QgsRasterLayer works."""
# This line is the cause of the problem:
qgis_layer = QgsRasterLayer(RASTER_BASE + '.tif', 'test')
layer = Raster(data=qgis_layer)
qgis_extent = qgis_layer.dataProvider().extent()
qgis_extent = [qgis_extent.xMinimum(), qgis_extent.yMinimum(),
qgis_extent.xMaximum(), qgis_extent.yMaximum()]
layer_exent = layer.get_bounding_box()
self.assertListEqual(
layer_exent, qgis_extent,
'Expected %s extent, got %s' % (qgis_extent, layer_exent))
def test_convert_to_qgis_raster_layer(self):
"""Test that converting to QgsVectorLayer works."""
# Create vector layer
keywords = read_keywords(RASTER_BASE + '.keywords')
layer = Raster(data=RASTER_BASE + '.tif', keywords=keywords)
# Convert to QgsRasterLayer
qgis_layer = layer.as_qgis_native()
qgis_extent = qgis_layer.dataProvider().extent()
qgis_extent = [qgis_extent.xMinimum(), qgis_extent.yMinimum(),
qgis_extent.xMaximum(), qgis_extent.yMaximum()]
layer_exent = layer.get_bounding_box()
self.assertListEqual(
layer_exent, qgis_extent,
'Expected %s extent, got %s' % (qgis_extent, layer_exent))
if __name__ == '__main__':
suite = unittest.makeSuite(RasterTest, 'test')
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
|
ivanamihalek/progesterone
|
16_UCSC_sources_to_ENCODE.py
|
Python
|
gpl-2.0
| 3,087
| 0.024619
|
#!/usr/bin/python3
#
# This file is part of Progesterone pipeline.
#
# Progesterone pipeline is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Progesterone pipeline is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Progesterone pipeline. If not, see <https://www.gnu.org/licenses/>.
#
from utils.mysqldb import *
import os
# UCSC doe not have the pointers directly back to ENCODE, so I found them for ESR1 - by hand
# encode_esr1_xps.tsv must contain 3 columns: UCSC id, encode experiment id, and encode file id
#########################################
def main():
conf_file = "/home/ivana/.mysql_conf"
mapping_file = "encode_esr1_xps.tsv"
for dependency in [conf_file, mapping_file]:
if not os.path.exists(dependency):
print(dependency,"not found")
exit()
encode_exp_id = {}
encode_file_id = {}
ucsc_ids = []
with o
|
pen(mapping_file,"r") as inf:
for line in inf:
if 'UCSC' in line: continue # header
[ucsc, encode_exp, encode_file] = line.split("\t")[:3]
ucsc_ids.append(ucsc)
encode_exp_id[ucsc] = encode_exp
encode_file_id[ucsc] = encode_file
#########################
# plug in to local database
db = connect_to_mysql(conf_file)
cursor = db.cursor()
search_db(cursor,"set autocommit=1")
switch_to_db(cursor,'progesterone')
# this might not be the best id
|
ea if the database grows really large
# first make sure we have single entry for each of multiple ids
for line in search_db(cursor,"select id, external_id from xrefs where xtype='ucsc'"):
[xref_id, ucsc_str] = line
ucsc_ids_stored = ucsc_str.split(",")
if len(ucsc_ids_stored) <2: continue
for ucsc_id in ucsc_ids_stored:
store_or_update(cursor, 'xrefs', {'xtype':'ucsc', 'external_id':ucsc_id}, None)
# now for each single entry, make parent point to encode file, and encode file's parent to encode exp
for line in search_db(cursor,"select id, external_id from xrefs where xtype='ucsc' and external_id not like '%,%'"):
[ucsc_xref_id, ucsc_id] = line
if not ucsc_id in ucsc_ids: continue
encode_file_xref_id = store_or_update(cursor, 'xrefs', {'xtype':'encode', 'external_id': encode_file_id[ucsc_id]}, None)
search_db(cursor, "update xrefs set parent_id=%d where id=%d" % (encode_file_xref_id, ucsc_xref_id))
encode_exp_xref_id = store_or_update(cursor, 'xrefs', {'xtype':'encode', 'external_id': encode_exp_id[ucsc_id]}, None)
search_db(cursor, "update xrefs set parent_id=%d where id=%d" % (encode_exp_xref_id, encode_file_xref_id))
cursor.close()
db.close()
return True
#########################################
########################################
if __name__ == '__main__':
main()
|
skeuomorf/Binge
|
binge/core/views.py
|
Python
|
mit
| 1,811
| 0.002209
|
from django.shortcuts import render
from django.contrib.auth.models import User
from django.utils.text import slugify
from django.core.exceptions import ObjectDoesNotExist
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.parsers import JSONParser
from .models import Element, Bucket
from .serializers import ElementSerializer
def homepage(request):
if request.user.is_authenticated():
to_watch_slug = slugify("{} {}".format(request.user.username, 'to_watch'))
to_watch = Bucket.objects.get(slug=to_watch_slug)
watched_slug = slugify("{} {}".format(request.user.username, 'watched'))
watched = Bucket.objects.get(slug=watched_slug)
to_watch_elements = to_watch.element_set.all()
watched_elements = watched.element_set.all()
context = {'to_watch': to_watch_elements, 'watched': watched_elements}
else:
context = {}
return render(request, 'core/lists.html', context)
@api_view(['POST'])
def add_element(request):
if request.method == 'POST':
data = JSONParser().parse(request)
buck
|
et_slug = slugify("{} {}".format(request.user.username, data['bucket']))
bucket = Bucke
|
t.objects.get(slug=bucket_slug)
data['bucket'] = bucket.id
try:
inst = Element.objects.get(name=data['name'], trakt_id=data['trakt_id'])
serializer = ElementSerializer(inst, data=data)
except ObjectDoesNotExist:
serializer = ElementSerializer(data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
crossgovernmentservices/csdigital-prototype
|
application/frontend/forms.py
|
Python
|
mit
| 402
| 0
|
fr
|
om flask.ext.wtf import Form
fro
|
m flask.ext.wtf.html5 import EmailField
from wtforms.validators import Required
from wtforms.fields import (
TextAreaField,
HiddenField
)
class LoginForm(Form):
email = EmailField('Email address', validators=[Required()])
next = HiddenField('next')
class FeedbackForm(Form):
feedback = TextAreaField('Your feedback', validators=[Required()])
|
nanocell/lsync
|
python/boto/route53/connection.py
|
Python
|
gpl-3.0
| 13,538
| 0.000222
|
# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import xml.sax
import time
import uuid
import urllib
import boto
from boto.connection import AWSAuthConnection
from boto import handler
from boto.resultset import ResultSet
import boto.jsonresponse
import exception
import hostedzone
HZXML = """<?xml version="1.0" encoding="UTF-8"?>
<CreateHostedZoneRequest xmlns="%(xmlns)s">
<Name>%(name)s</Name>
<CallerReference>%(caller_ref)s</CallerReference>
<HostedZoneConfig>
<Comment>%(comment)s</Comment>
</HostedZoneConfig>
</CreateHostedZoneRequest>"""
#boto.set_stream_logger('dns')
class Route53Connection(AWSAuthConnection):
DefaultHost = 'route53.amazonaws.com'
"""The default Route53 API endpoint to connect to."""
Version = '2012-02-29'
"""Route53 API version."""
XMLNameSpace = 'https://route53.amazonaws.com/doc/2012-02-29/'
"""XML schema for this Route53 API version."""
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
port=None, proxy=None, proxy_port=None,
host=DefaultHost, debug=0, security_token=None,
validate_certs=True):
AWSAuthConnection.__init__(self, host,
aws_access_key_id, aws_secret_access_key,
True, port, proxy, proxy_port, debug=debug,
security_token=security_token,
validate_certs=validate_certs)
def _required_auth_capability(self):
return ['route53']
def make_request(self, action, path, headers=None, data='', params=None):
if params:
pairs = []
for key, val in params.iteritems():
if val is None:
continue
pairs.append(key + '=' + urllib.quote(str(val)))
path += '?' + '&'.join(pairs)
return AWSAuthConnection.make_request(self, action, path,
headers, data)
# Hosted Zones
def get_all_hosted_zones(self, start_marker=None, zone_list=None):
"""
Returns a Python data structure with information about all
Hosted Zones defined for the AWS account.
:param int start_marker: start marker to pass when fetching additional
results after a truncated list
:param list zone_list: a HostedZones list to prepend to results
"""
params = {}
if start_marker:
params = {'marker': start_marker}
response = self.make_request('GET', '/%s/hostedzone' % self.Version,
params=params)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element(list_marker='HostedZones',
item_marker=('HostedZone',))
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
if zone_list:
e['ListHostedZonesResponse']['HostedZones'].extend(zone_list)
while 'NextMarker' in e['ListHostedZonesResponse']:
next_marker = e['ListHostedZonesResponse']['NextMarker']
zone_list = e['ListHostedZonesResponse']['HostedZones']
e = self.get_all_hosted_zones(next_marker, zone_list)
return e
def get_hosted_zone(self, hosted_zone_id):
"""
Get detailed information about a particular Hosted Zone.
:type hosted_zone_id: str
:param hosted_zone_id: The unique identifier for the Hosted Zone
"""
uri = '/%s/hostedzone/%s' % (self.Version, hosted_zone_id)
response = self.make_request('GET', uri)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element(list_marker='NameServers',
item_marker=('NameServer',))
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
def get_hosted_zone_by_name(self, hosted_zone_name):
"""
Get detailed information about a particular Hosted Zone.
:type hosted_zone_name: str
:param hosted_zone_name: The fully qualified domain name for the Hosted
Zone
"""
if hosted_zone_name[-1] != '.':
hosted_zone_name += '.'
all_hosted_zones = self.get_all_hosted_zones()
for zone in all_hosted_zones['ListHostedZonesResponse']['HostedZones']:
#check that they gave us the FQDN for their zone
if zone['Name'] == hosted_zone_name:
return self.get_hosted_zone(zone['Id'].split('/')[-1])
def create_hosted_zone(self, domain_name, caller_ref=None, comment=''):
"""
Create a new Hosted Zone. Returns a Python data structure with
information about the newly created Hosted Zone.
:type domain_name: str
:param domain_name: The name of the domain. This should be a
fully-specified domain, and should end with a final period
as the last label indication. If you omit the final period,
Amazon Route 53 assumes the domain is relative to the root.
This is the name you have registered with your DNS registrar.
It is also the name you will delegate from your registrar to
the Amazon Route 53 delegation servers returned in
response to this request.A list of strings with the image
IDs wanted.
:type caller_ref: str
:param caller_ref: A unique string that ide
|
ntifies the request
and that allows f
|
ailed CreateHostedZone requests to be retried
without the risk of executing the operation twice. If you don't
provide a value for this, boto will generate a Type 4 UUID and
use that.
:type comment: str
:param comment: Any comments you want to include about the hosted
zone.
"""
if caller_ref is None:
caller_ref = str(uuid.uuid4())
params = {'name': domain_name,
'caller_ref': caller_ref,
'comment': comment,
'xmlns': self.XMLNameSpace}
xml = HZXML % params
uri = '/%s/hostedzone' % self.Version
response = self.make_request('POST', uri,
{'Content-Type': 'text/xml'}, xml)
body = response.read()
boto.log.debug(body)
if response.status == 201:
e = boto.jsonresponse.Element(list_marker='NameServers',
item_marker=('NameServer',))
h = boto.jsonresponse.XmlHandler(e, None)
h.pa
|
sdoran35/hate-to-hugs
|
venv/lib/python3.6/site-packages/nltk/translate/ibm2.py
|
Python
|
mit
| 12,271
| 0.000815
|
# -*- coding: utf-8 -*-
# Natural Language Toolkit: IBM Model 2
#
# Copyright (C) 2001-2013 NLTK Project
# Authors: Chin Yee Lee, Hengfeng Li, Ruxin Hou, Calvin Tanujaya Lim
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Lexical translation model that considers word order.
IBM Model 2 improves on Model 1 by accounting for word order.
An alignment probability is introduced, a(i | j,l,m), which predicts
a source word position, given its aligned target word's position.
The EM algorithm used in Model 2 is:
E step - In the training data, collect counts, weighted by prior
probabilities.
(a) count how many times a source language word is translated
into a target language word
(b) count how many times a particular position in the source
sentence is aligned to a particular position in the target
sentence
M step - Estimate new probabilities based on the counts from the E step
Notations:
i: Position in the source sentence
Valid values are 0 (for NULL), 1, 2, ..., length of source sentence
j: Position in the target sentence
Valid values are 1, 2, ..., length of target sentence
l: Number of words in the source sentence, excluding NULL
m: Number of words in the target sentence
s: A word in the source language
t: A word in the target language
References:
Philipp Koehn. 2010. Statistical Machine Translation.
Cambridge University Press, New York.
Peter E Brown, Stephen A. Della Pietra, Vincent J. Della Pietra, and
Robert L. Mercer. 1993. The Mathematics of Statistical Machine
Translation: Parameter Estimation. Computational Linguistics, 19 (2),
263-311.
"""
from __future__ import division
from collections import defaultdict
from nltk.translate import AlignedSent
from nltk.translate import Alignment
from nltk.translate import IBMModel
from nltk.translate import IBMModel1
from nltk.translate.ibm_model import Counts
import warnings
class IBMModel2(IBMModel):
"""
Lexical translation model that considers word order
>>> bitext = []
>>> bitext.append(AlignedSent(['klein', 'ist', 'das', 'haus'], ['the', 'house', 'is', 'small']))
>>> bitext.append(AlignedSent(['das', 'haus', 'ist', 'ja', 'groß'], ['the', 'house', 'is', 'big']))
>>> bitext.append(AlignedSent(['das', 'buch', 'ist', 'ja', 'klein'], ['the', 'book', 'is', 'small']))
>>> bitext.append(AlignedSent(['das', 'haus'], ['the', 'house']))
>>> bitext.append(AlignedSent(['das', 'buch'], ['the', 'book']))
>>> bitext.append(AlignedSent(['ein', 'buch'], ['a', 'book']))
>>> ibm2 = IBMModel2(bitext, 5)
>>> print(round(ibm2.translation_table['buch']['book'], 3))
1.0
>>> print
|
(round(ibm2.translation_table['das']['book'], 3))
0.0
>>> print(round(ibm2.translation_table['buch'][None], 3))
0.0
>>> print(round(ibm2.translation_table['ja'][None], 3))
0.0
>>> print(ibm2.alignment_table[1][1][2][2])
0.938...
>>> print(round(ibm2.alignment_table[1][2][2][2], 3))
0.0
>>> print(round(ibm2.alignment_table[2][2][4][5], 3))
1.0
>>> test_sentence = bitext[2]
>>> test_sentence.words
['das', 'buch', 'i
|
st', 'ja', 'klein']
>>> test_sentence.mots
['the', 'book', 'is', 'small']
>>> test_sentence.alignment
Alignment([(0, 0), (1, 1), (2, 2), (3, 2), (4, 3)])
"""
def __init__(self, sentence_aligned_corpus, iterations,
probability_tables=None):
"""
Train on ``sentence_aligned_corpus`` and create a lexical
translation model and an alignment model.
Translation direction is from ``AlignedSent.mots`` to
``AlignedSent.words``.
:param sentence_aligned_corpus: Sentence-aligned parallel corpus
:type sentence_aligned_corpus: list(AlignedSent)
:param iterations: Number of iterations to run training algorithm
:type iterations: int
:param probability_tables: Optional. Use this to pass in custom
probability values. If not specified, probabilities will be
set to a uniform distribution, or some other sensible value.
If specified, all the following entries must be present:
``translation_table``, ``alignment_table``.
See ``IBMModel`` for the type and purpose of these tables.
:type probability_tables: dict[str]: object
"""
super(IBMModel2, self).__init__(sentence_aligned_corpus)
if probability_tables is None:
# Get translation probabilities from IBM Model 1
# Run more iterations of training for Model 1, since it is
# faster than Model 2
ibm1 = IBMModel1(sentence_aligned_corpus, 2 * iterations)
self.translation_table = ibm1.translation_table
self.set_uniform_probabilities(sentence_aligned_corpus)
else:
# Set user-defined probabilities
self.translation_table = probability_tables['translation_table']
self.alignment_table = probability_tables['alignment_table']
for n in range(0, iterations):
self.train(sentence_aligned_corpus)
self.__align_all(sentence_aligned_corpus)
def set_uniform_probabilities(self, sentence_aligned_corpus):
# a(i | j,l,m) = 1 / (l+1) for all i, j, l, m
l_m_combinations = set()
for aligned_sentence in sentence_aligned_corpus:
l = len(aligned_sentence.mots)
m = len(aligned_sentence.words)
if (l, m) not in l_m_combinations:
l_m_combinations.add((l, m))
initial_prob = 1 / (l + 1)
if initial_prob < IBMModel.MIN_PROB:
warnings.warn("A source sentence is too long (" + str(l) +
" words). Results may be less accurate.")
for i in range(0, l + 1):
for j in range(1, m + 1):
self.alignment_table[i][j][l][m] = initial_prob
def train(self, parallel_corpus):
counts = Model2Counts()
for aligned_sentence in parallel_corpus:
src_sentence = [None] + aligned_sentence.mots
trg_sentence = ['UNUSED'] + aligned_sentence.words # 1-indexed
l = len(aligned_sentence.mots)
m = len(aligned_sentence.words)
# E step (a): Compute normalization factors to weigh counts
total_count = self.prob_all_alignments(src_sentence, trg_sentence)
# E step (b): Collect counts
for j in range(1, m + 1):
t = trg_sentence[j]
for i in range(0, l + 1):
s = src_sentence[i]
count = self.prob_alignment_point(
i, j, src_sentence, trg_sentence)
normalized_count = count / total_count[t]
counts.update_lexical_translation(normalized_count, s, t)
counts.update_alignment(normalized_count, i, j, l, m)
# M step: Update probabilities with maximum likelihood estimates
self.maximize_lexical_translation_probabilities(counts)
self.maximize_alignment_probabilities(counts)
def maximize_alignment_probabilities(self, counts):
MIN_PROB = IBMModel.MIN_PROB
for i, j_s in counts.alignment.items():
for j, src_sentence_lengths in j_s.items():
for l, trg_sentence_lengths in src_sentence_lengths.items():
for m in trg_sentence_lengths:
estimate = (counts.alignment[i][j][l][m] /
counts.alignment_for_any_i[j][l][m])
self.alignment_table[i][j][l][m] = max(estimate,
MIN_PROB)
def prob_all_alignments(self, src_sentence, trg_sentence):
"""
Computes the probability of all possible word alignments,
expressed as a marginal distribution over target words t
Each entry in the return value represents the contribution to
the total alignment probability by the target word
|
nesterione/problem-solving-and-algorithms
|
problems/Empireofcode/IndexPower.py
|
Python
|
apache-2.0
| 441
| 0.018141
|
def index_power(array, n):
if n>=len(array):
return -1
else:
return array[n
|
]**n
if __name__ == '__main__':
# These "asserts" using only for self-checking and not necessary for auto-testing
a
|
ssert index_power([1, 2, 3, 4], 2) == 9, "Square"
assert index_power([1, 3, 10, 100], 3) == 1000000, "Cube"
assert index_power([0, 1], 0) == 1, "Zero power"
assert index_power([1, 2], 3) == -1, "IndexError"
|
liyongyue/dnsspider
|
dns/opcode.py
|
Python
|
isc
| 2,603
| 0.005763
|
# Copyright (C) 2001-2005 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# document
|
ation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH
|
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS Opcodes."""
import dns.exception
QUERY = 0
IQUERY = 1
STATUS = 2
NOTIFY = 4
UPDATE = 5
_by_text = {
'QUERY' : QUERY,
'IQUERY' : IQUERY,
'STATUS' : STATUS,
'NOTIFY' : NOTIFY,
'UPDATE' : UPDATE
}
# We construct the inverse mapping programmatically to ensure that we
# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
# would cause the mapping not to be true inverse.
_by_value = dict([(y, x) for x, y in _by_text.iteritems()])
class UnknownOpcode(dns.exception.DNSException):
"""Raised if an opcode is unknown."""
pass
def from_text(text):
"""Convert text into an opcode.
@param text: the textual opcode
@type text: string
@raises UnknownOpcode: the opcode is unknown
@rtype: int
"""
if text.isdigit():
value = int(text)
if value >= 0 and value <= 15:
return value
value = _by_text.get(text.upper())
if value is None:
raise UnknownOpcode
return value
def from_flags(flags):
"""Extract an opcode from DNS message flags.
@param flags: int
@rtype: int
"""
return (flags & 0x7800) >> 11
def to_flags(value):
"""Convert an opcode to a value suitable for ORing into DNS message
flags.
@rtype: int
"""
return (value << 11) & 0x7800
def to_text(value):
"""Convert an opcode to text.
@param value: the opcdoe
@type value: int
@raises UnknownOpcode: the opcode is unknown
@rtype: string
"""
text = _by_value.get(value)
if text is None:
text = str(value)
return text
def is_update(flags):
"""True if the opcode in flags is UPDATE.
@param flags: DNS flags
@type flags: int
@rtype: bool
"""
if (from_flags(flags) == UPDATE):
return True
return False
|
ptdtan/Ragout
|
lib/newick/treetest.py
|
Python
|
gpl-3.0
| 4,858
| 0.009881
|
import unittest
import lexer
import parser
from tree import *
from tree import _TreeBuilder
class BuilderTest(unittest.TestCase):
''' Test of the _TreeBuilder (and Leaf and Tree) class. '''
def testTreeBuilding(self):
''' Test that the tree builder constructs trees correctly when
parsed. '''
l = lexer.Lexer("(('foo' : 0.1, 'bar' : 1.0) : 2, baz)")
handler = _TreeBuilder()
p = parser._Parser(l,handler)
p.parse()
t = handler.get_result()
self.assertEqual(len(t.get_edges()),2)
(t1,b1,l1), (t2,b2,l2) = t.get_edges()
self.assertEqual(len(t1.get_edges()),2)
self.assertEqual(l1, 2.0)
self.assertEqual(t2.__class__, Leaf)
self.assertEqual(l2, None)
self.assertEqual(t.leaves_identifiers, ['foo','bar','baz'])
class TestParseTree(unittest.TestCase):
''' Test of the parse_tree() function. '''
def testTreeStructure(self):
''' Test that a parsed tree has the right structure. '''
t = parse_tree("(('foo' : 0.1, 'bar' : 1.0) : 2, baz)")
self.assertEqual(len(t.get_edges()),2)
(t1,b1,l1), (t2,b2,l2) = t.get_edges()
self.assertEqual(len(t1.get_edges()),2)
self.assertEqual(l1, 2.0)
self.assertEqual(t2.__class__, Leaf)
self.assertEqual(l2, None)
self.assertEqual(t.leaves_identifiers, ['foo','bar','baz'])
def testSpecialCases(self):
''' Test that we can parse some special cases of trees. '''
tree = parse_tree("(B,(A,C,E),D);")
self.assertEqual(tree.leaves_identifiers,['B','A','C','E','D'])
tree = parse_tree("(,(,,),);")
self.assertEqual(tree.leaves_identifiers,['']*5)
# underscores are considered empty leaf names!
tree = parse_tree("(_,(_,_,_),_);")
self.assertEqual(tree.leaves_identifiers,['']*5)
# the rest is just checking that we do not crash on this input...
parse_tree("""
(
('Chimp':0.052625,
'Human':0.042375):0.007875,
'Gorilla':0.060125,
('Gibbon':0.124833,
'Orangutan':0.0971667):0.038875
);
""")
parse_tree("""
(
('Chimp':0.052625,
'Human':0.042375) 0.71 : 0.007875,
'Gorilla':0.060125,
('Gibbon':0.124833,
'Orangutan':0.0971667) 1.00 :0.038875
);
""")
class TreeTest(unittest.TestCase):
''' Test of the Tree (and Leaf and _TreeBuilder) class. '''
def testProperties(self):
''' Test that the tree properties lets us extract the right
information. '''
t = parse_tree('((A,B),C);')
self.assertEqual(t.leaves_identifiers, ['A','B','C'])
self.assertNotEqual(t.leaves, ['A','B','C'])
self.assertEqual(len(t.edges), 2)
(n1,_,_), (n2,_,_) = t.edges
self.assertEqual(type(n1), Tree)
self.assertEqual(type(n2), Leaf)
self.assertEqual(n2.identifier, 'C')
class TestFunctions(unittest.TestCase):
''' Test of the module-level functions. '''
def testAddParentLink(self):
''' Test the add_parent_links() function. '''
t = parse_tree('((A,B),C);')
add_parent_links(t)
self.assertEqual([str(l.parent) for l in t.leaves],
["('A', 'B')", "('A', 'B')", "(('A', 'B'), 'C')"])
def testLabel(self):
''' Test if trees with labels are parsed correctly. '''
t = parse_tree("(('A', 'B')label, 'C')")
self.assertEqual(str(t), "(('A', 'B')label, 'C')")
t = parse_tree("(('A', 'B')label, 'C')treelabel")
self.assertEqual(
|
t.identifier, "treelabel")
t = parse_tree("(('A', 'B')label, 'C')1")
self.assertEqual(t.identifier, "1")
def testAddDistanceFromRoot(self):
''' Test the add_distance_from_root() function. '''
t = parse_tree('((A,B),C);')
add_distance_from_root(t)
self.assertEqual([l.distance_from_root for l in t.leaves],[0,0,0])
t = parse_tree('((A:2,B:3):1,C:6);')
add_distance_from_root(t)
self.assertEqual([l.di
|
stance_from_root for l in t.leaves],[3,4,6])
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(BuilderTest))
test_suite.addTest(unittest.makeSuite(TestParseTree))
test_suite.addTest(unittest.makeSuite(TreeTest))
test_suite.addTest(unittest.makeSuite(TestFunctions))
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(test_suite)
# from tree import TreeVisitor
# def relabel(tree):
# "Relabel the tree's leaves."
# # visitor pattern.
# class V(TreeVisitor):
# def __init__(self):
# self.count = 0
# def visit_leaf(self,leaf):
# leaf.identifier = str(self.count)
# self.count += 1
# # let visitor traverse tree
# tree.dfs_traverse(V())
#
# relabel(t)
# print t
|
Reactive-Extensions/RxPy
|
tests/test_observable_creation.py
|
Python
|
apache-2.0
| 34,022
| 0.006937
|
from rx import Observable
from rx.testing import TestScheduler, ReactiveTest, is_prime, MockDisposable
from rx.disposables import Disposable, SerialDisposable
on_next = ReactiveTest.on_next
on_completed = ReactiveTest.on_completed
on_error = ReactiveTest.on_error
subscribe = ReactiveTest.subscribe
subscribed = ReactiveTest.subscribed
disposed = ReactiveTest.disposed
created = ReactiveTest.created
class RxException(Exception):
pass
# Helper function for raising exceptions within lambdas
def _raise(ex):
raise RxException(ex)
class BooleanDisposable(object):
def __init__(self):
self.is_disposed = False
def dispose(self):
self.is_disposed = True
return self.is_disposed
def test_return_basic():
scheduler = TestScheduler()
def factory():
return Observable.return_value(42, scheduler)
results = scheduler.start(factory)
results.messages.assert_equal(
on_next(201, 42),
on_completed(201))
def test_return_disposed():
scheduler = TestScheduler()
def factory():
return Observable.return_value(42, scheduler)
results = scheduler.start(factory, disposed=200)
results.messages.assert_equal()
def test_return_disposed_after_next():
scheduler = TestScheduler()
d = SerialDisposable()
xs = Observable.return_value(42, scheduler)
results = scheduler.create_observer()
def action(scheduler, state):
def on_next(x):
d.dispose()
results.on_next(x)
def on_error(e):
results.on_error(e)
def on_completed():
results.on_completed()
d.disposable = xs.subscribe(on_next, on_error, on_completed)
return d.disposable
scheduler.schedule_absolute(100, action)
scheduler.start()
results.messages.assert_equal(on_next(101, 42))
def test_return_observer_throws():
scheduler1 = TestScheduler()
xs = Observable.return_value(1, scheduler1)
xs.subscribe(lambda x: _raise('ex'))
try:
scheduler1.start()
except RxException:
pass
scheduler2 = TestScheduler()
ys = Observable.return_value(1, scheduler2)
ys.subscribe(lambda x: x, lambda ex: ex, lambda: _raise('ex'))
try:
scheduler2.start()
except RxException:
pass
def test_never_basic():
scheduler = TestScheduler()
xs = Observable.never()
results = scheduler.create_observer()
xs.subscribe(results)
scheduler.start()
results.messages.assert_equal()
def test_throw_exception_basic():
scheduler = TestScheduler()
ex = 'ex'
def factory():
return Observable.throw_exception(ex, scheduler)
results = scheduler.start(factory)
results.messages.assert_equal(on_error(201, ex))
def test_throw_disposed():
scheduler = TestScheduler()
def factory():
return Observable.throw_exception('ex', scheduler)
results = scheduler.start(factory, disposed=200)
results.messages.assert_equal()
def test_throw_observer_throws():
scheduler = TestScheduler()
xs = Observable.throw_exception('ex', scheduler)
xs.subscribe(lambda x: None, lambda ex: _raise('ex'), lambda: None)
try:
return scheduler.start()
except RxException:
pass
def test_empty_basic():
scheduler = TestScheduler()
def factory():
return Observable.empty(scheduler)
results = scheduler.start(factory)
results.messages.assert_equal(on_completed(201))
def test_empty_disposed():
scheduler = TestScheduler()
def factory():
return Observable.empty(scheduler)
results = scheduler.start(factory, disposed=200)
results.messages.assert_equal()
def test_empty_observer_throw_exception():
scheduler = TestScheduler()
xs = Observable.empty(scheduler)
xs.subscribe(lambda x: None, lambda ex: None, lambda: _raise('ex'))
try:
return scheduler.start()
except RxException:
pass
def test__subscribe_to_enumerable_finite():
enumerable_finite = [1, 2, 3, 4, 5]
scheduler = TestScheduler()
def create():
return Observable.from_array(enumerable_finite, scheduler)
results = scheduler.start(create)
results.messages.assert_equal(
on_next(201, 1),
on_next(202, 2),
on_next(203, 3),
on_next(204, 4),
on_next(205, 5),
on_completed(206)
)
def test_generate_finite():
scheduler = TestScheduler()
def create():
return Observable.generate(0,
lambda x: x <= 3,
lambda x: x + 1,
lambda x: x,
scheduler)
results = scheduler.start(create)
results.messages.assert_equal(
on_next(201, 0),
on_next(202, 1),
on_next(203, 2),
on_next(204, 3),
on_completed(205)
)
def test_generate_throw_condition():
scheduler = TestScheduler()
ex = 'ex'
def create():
return Observable.generate(0,
lambda x: _raise('ex'),
lambda x: x + 1,
lambda x: x,
scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_error(201, ex))
def test_generate_throw_result_selector():
scheduler = TestScheduler()
ex = 'ex'
def create():
return Observable.generate(0,
lambda x: True,
lambda x: x + 1,
lambda x: _raise('ex'),
scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_error(201, ex))
def test_generate_throw_iterate():
scheduler = TestScheduler()
ex = 'ex'
def create():
return Observable.generate(0,
lam
|
bda x: True,
lambda x: _raise(ex),
lambda x: x,
scheduler)
results = scheduler.start(create)
results.messages.assert_equal(
on_next(201, 0),
|
on_error(202, ex)
)
def test_generate_dispose():
scheduler = TestScheduler()
ex = 'ex'
def create():
return Observable.generate(0,
lambda x: True,
lambda x: x + 1,
lambda x: x,
scheduler)
results = scheduler.start(create, disposed=203)
results.messages.assert_equal(
on_next(201, 0),
on_next(202, 1))
def test_defer_complete():
xs = None
invoked = 0
scheduler = TestScheduler()
def create():
def defer():
nonlocal invoked, xs
invoked += 1
xs = scheduler.create_cold_observable(
on_next(100, scheduler.clock),
on_completed(200)
)
return xs
return Observable.defer(defer)
results = scheduler.start(create)
results.messages.assert_equal(
on_next(300, 200),
on_completed(400)
)
assert(1 == invoked)
return xs.subscriptions.assert_equal(subscribe(200, 400))
def test_defer_error():
scheduler = TestScheduler()
invoked = 0
xs = None
ex = 'ex'
def create():
def defer():
nonlocal invoked, xs
invoked += 1
xs = scheduler.create_cold_observable(on_next(100, scheduler.clock), on_error(200, ex))
return xs
return Observable.defer(defer)
results = scheduler.start(create)
results.messages.assert_equal(on_next(300, 200), on_error(400, ex))
assert (1 == invoked)
return xs.subscriptions.assert_equal(subscribe(200, 400))
def test_defer_dispose():
scheduler = TestScheduler()
invoked = 0
xs = None
def create():
def defer():
nonlocal invoked, xs
invoked
|
allegro/django-powerdns-dnssec
|
powerdns/models/__init__.py
|
Python
|
bsd-2-clause
| 162
| 0
|
from .ownership import * # noqa
from
|
.powerdns import * # noqa
|
from .requests import * # noqa
from .templates import * # noqa
from .tsigkeys import * # noqa
|
kuke/models
|
legacy/image_classification/googlenet.py
|
Python
|
apache-2.0
| 5,341
| 0
|
import paddle.v2 as paddle
__all__ = ['googlenet']
def inception(name, input, channels, filter1, filter3R, filter3, filter5R,
filter5, proj):
cov1 = paddle.layer.img_conv(
name=name + '_1',
input=input,
filter_size=1,
num_channels=channels,
num_filters=filter1,
stride=1,
padding=0)
cov3r = paddle.layer.img_conv(
name=name + '_3r',
input=input,
filter_size=1,
num_channels=channels,
num_filters=filter3R,
stride=1,
padding=0)
cov3 = paddle.layer.img_conv(
name=name + '_3',
input=cov3r,
filter_size=3,
num_filters=filter3,
stride=1,
padding=1)
cov5r = paddle.layer.img_conv(
name=name + '_5r',
input=input,
filter_size=1,
num_channels=channels,
num_filters=filter5R,
stride=1,
padding=0)
cov5 = paddle.layer.img_conv(
name=name + '_5',
input=cov5r,
filter_size=5,
num_filters=filter5,
stride=1,
padding=2)
pool1 = paddle.layer.img_pool(
name=name + '_max',
input=input,
pool_size=3,
num_channels=channels,
stride=1,
padding=1)
covprj = paddle.layer.img_conv(
name=name + '_proj',
input=pool1,
filter_size=1,
num_filters=proj,
stride=1,
padding=0)
cat = paddle.layer.concat(name=name, input=[cov1, cov3, cov5, covprj])
return cat
def googlenet(input, class_dim):
# stage 1
conv1 = paddle.layer.img_conv(
name="conv1",
input=input,
filter_size=7,
num_channels=3,
num_filters=64,
stride=2,
padding=3)
pool1 = paddle.layer.img_pool(
name="pool1", input=conv1, pool_size=3, num_channels=64, stride=2)
# stage 2
conv2_1 = paddle.layer.img_conv(
name="conv2_1",
input=pool1,
filter_size=1,
num_filters=64,
stride=1,
padding=0)
conv2_2 = paddle.layer.img_conv(
name="conv2_2",
input=conv2_1,
filter_size=3,
num_filters=192,
stride=1,
padding=1)
pool2 = paddle.layer.img_pool(
name="pool2", input=conv2_2, pool_size=3, num_channels=192, stride=2)
# stage 3
ince3a = inception("ince3a", pool2, 192, 64, 96, 128, 16, 32, 32)
ince3b = inception("ince3b", ince3a, 256, 128, 128, 192, 32, 96, 64)
pool3 = paddle.layer.img_pool(
name="pool3", input=ince3b, num_channels=480, pool_size=3, stride=2)
# stage 4
ince4a = inception("ince4a", pool3, 480, 192, 96, 208, 16, 48, 64)
ince4b = inception("ince4b", ince4a, 512, 160, 112, 224, 24, 64, 64)
ince4c = inception("ince4c", ince4b, 512, 128, 128, 256, 24, 64, 64)
ince4d = inception("ince4d", ince4c, 512, 112, 144, 288, 32, 64, 64)
ince4e = inception("ince4e", ince4d, 528, 256, 160, 320, 32, 128, 128)
pool4 = paddle.layer.img_pool(
name="pool4", input=ince4e, num_channels=832, pool_size=3, stride=2)
# stage 5
ince5a = inception("ince5a", pool4, 832, 256, 160, 320, 32, 128, 128)
ince5b = inception("ince5b", ince5a, 832, 384, 192, 384, 48, 128, 128)
pool5 = paddle.layer.img_pool(
name="pool5",
input=ince5b,
num_channels=1024,
pool_size=7,
stride=7,
pool_type=paddle.pooling.Avg())
dropout = paddle.layer.addto(
input=pool5,
layer_attr=paddle.attr.Extra(drop_rate=0.4),
act=paddle.activation.Linear())
out = paddle.layer.fc(input=dropout,
size=class_dim,
act=paddle.activation.Softmax())
# fc for output 1
pool_o1 = paddle.layer.img_pool(
name="pool_o1",
input=ince4a,
num_channels=512,
pool_size=5,
stride=3,
pool_type=paddle.pooling.Avg())
conv_o1 = paddle.layer.img_conv(
name="conv_o1",
input=pool_o1,
filter_size=1,
num_filters=128,
stride=1,
padding=0)
|
fc_o1 = paddle.layer.fc(name="fc_o1",
input=conv_o1,
size=1024,
layer_attr=paddle.attr
|
.Extra(drop_rate=0.7),
act=paddle.activation.Relu())
out1 = paddle.layer.fc(input=fc_o1,
size=class_dim,
act=paddle.activation.Softmax())
# fc for output 2
pool_o2 = paddle.layer.img_pool(
name="pool_o2",
input=ince4d,
num_channels=528,
pool_size=5,
stride=3,
pool_type=paddle.pooling.Avg())
conv_o2 = paddle.layer.img_conv(
name="conv_o2",
input=pool_o2,
filter_size=1,
num_filters=128,
stride=1,
padding=0)
fc_o2 = paddle.layer.fc(name="fc_o2",
input=conv_o2,
size=1024,
layer_attr=paddle.attr.Extra(drop_rate=0.7),
act=paddle.activation.Relu())
out2 = paddle.layer.fc(input=fc_o2,
size=class_dim,
act=paddle.activation.Softmax())
return out, out1, out2
|
projectcalico/calico-neutron
|
neutron/agent/metadata/config.py
|
Python
|
apache-2.0
| 3,403
| 0
|
# Copyright 2015 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from neutron.common import utils
METADATA_PROXY_HANDLER_OPTS = [
cfg.StrOpt('admin_user',
help=_("Admin user")),
cfg.StrOpt('admin_password',
help=_("Admin password"),
secret=True),
cfg.StrOpt('admin_tenant_name',
help=_("Admin tenant name")),
cfg.StrOpt('auth_url',
help=_("Authentication URL")),
cfg.StrOpt('auth_strategy', default='keystone',
help=_("The type of authentication to use")),
cfg.StrOpt('auth_region',
help=_("Authentication region")),
cfg.BoolOpt('auth_insecure',
default=False,
help=_("Turn off verification of the certificate for"
" ssl")),
cfg.StrOpt('auth_ca_cert',
help=_("Certificate Authority public key (CA cert) "
"file
|
for ssl")),
cfg.StrOpt('endpoint_type',
default='adminURL',
help=_("Network service endpoint type to pull from "
"the keystone catalog")),
cfg.StrOpt('nova_metadata_ip', default='127.0.0.1',
help=_("IP address used by Nova metadata server
|
.")),
cfg.IntOpt('nova_metadata_port',
default=8775,
help=_("TCP Port used by Nova metadata server.")),
cfg.StrOpt('metadata_proxy_shared_secret',
default='',
help=_('Shared secret to sign instance-id request'),
secret=True),
cfg.StrOpt('nova_metadata_protocol',
default='http',
choices=['http', 'https'],
help=_("Protocol to access nova metadata, http or https")),
cfg.BoolOpt('nova_metadata_insecure', default=False,
help=_("Allow to perform insecure SSL (https) requests to "
"nova metadata")),
cfg.StrOpt('nova_client_cert',
default='',
help=_("Client certificate for nova metadata api server.")),
cfg.StrOpt('nova_client_priv_key',
default='',
help=_("Private key of client certificate."))
]
UNIX_DOMAIN_METADATA_PROXY_OPTS = [
cfg.StrOpt('metadata_proxy_socket',
default='$state_path/metadata_proxy',
help=_('Location for Metadata Proxy UNIX domain socket')),
cfg.IntOpt('metadata_workers',
default=utils.cpu_count() // 2,
help=_('Number of separate worker processes for metadata '
'server')),
cfg.IntOpt('metadata_backlog',
default=4096,
help=_('Number of backlog requests to configure the '
'metadata server socket with'))
]
|
MJB47/Jokusoramame
|
migrations/versions/9a0f78ff57d6_add_tagalias_table.py
|
Python
|
mit
| 1,126
| 0.008881
|
"""Add TagAlias table
Revision ID: 9a0f78ff57d6
Revises: d19881f4c045
Create Date: 2017-03-19 15:34:30.271997
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '9a0f78ff57d6'
down_revision = 'd19881f4c045'
branch_labels = None
depends_on = None
def upgra
|
de():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('tag_alias',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('alias_name', sa.String(
|
), nullable=False),
sa.Column('tag_id', sa.Integer(), nullable=True),
sa.Column('guild_id', sa.BigInteger(), nullable=True),
sa.Column('user_id', sa.BigInteger(), nullable=True),
sa.ForeignKeyConstraint(['guild_id'], ['guild.id'], ),
sa.ForeignKeyConstraint(['tag_id'], ['tag.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('tag_alias')
# ### end Alembic commands ###
|
Matoking/pastebin-django
|
users/views.py
|
Python
|
unlicense
| 14,332
| 0.012071
|
from django.shortcuts import render
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout, update_session_auth_hash
from django.core.exceptions import ObjectDoesNotExist
from django.core.cache import cache
from django.http import HttpResponse, HttpResponseRedirect
from django.db import transaction
from django_redis import get_redis_connection
from users.models import PastebinUser
from users.forms import RegisterForm, LoginForm, ChangePreferencesForm, ChangePasswordForm, VerifyPasswordForm
from users.models import Favorite, SiteSettings
from pastes.models import Paste
from pastebin.util import Paginator
import math
def register_view(request):
"""
Register a new user
"""
# Check if the user is authenticated
if request.user.is_authenticated():
# User is already authenticated
return render(request, 'users/register/already_logged_in.html')
else:
register_form = RegisterForm(request.POST or None)
if request.method == 'POST': # Form data was submitted
if register_form.is_valid(): # Form data is valid
# Create the user
with transaction.atomic():
user = User.objects.create_user(register_form.cleaned_data['username'],
"N/A", # we don't deal with email addresses
register_form.cleaned_data['password'])
PastebinUser.create_user(user)
# TODO: Show a different message if the registration fails
return render(request, 'users/register/register_success.html')
# Show the registration page
return render(request, "users/register/register.html", { "form": register_form })
def login_view(request):
"""
Log the user in
"""
# Check if the user is authenticated
if request.user.is_authenticated():
# User is authenticated
return render(request, "users/login/logged_in.html")
else:
login_form = LoginForm(request.POST or None)
# User is NOT authenticated
if request.method == 'POST': # Form data was submitted
if login_form.is_valid(): # Form data is valid
user = authenticate(username = login_form.cleaned_data['username'],
password = login_form.cleaned_data['password'])
if user is not None and user.is_active:
login(request, user)
return render(request, "users/login/logged_in.html")
else:
# Couldn't authenticate, either the username or password is wrong
error = "User doesn't exist or the password is incorrect."
login_form._errors['password'] = login_form.error_class([error])
# Show the login form
return render(request, "users/login/login.html", { "form": login_form })
def logout_view(request):
"""
Logout the user and show the logout page
"""
if request.user.is_authenticated():
logout(request)
return render(request, 'users/logout/logged_out.html')
def profile(request, username, tab="home", page=1):
"""
Show a publicly visible profile page
"""
page = int(page)
try:
profile_user = cache.get("user:%s" % username)
if profile_user == None:
profile_user = User.objects.get(username=username)
cache.set("user:%s" % username, profile_user)
elif profile_user == False:
return render(request, "users/profile/profile_error.html", {"reason": "not_found"}, status=404)
except ObjectDoesNotExist:
cache.set("user:%s" % username, False)
return render(request, "users/profile/profile_error.html", {"reason": "not_found"}, status=404)
# Get user's settings
profile_settings = cache.get("site_settings:%s" % username)
if profile_settings == None:
try:
profile_settings = SiteSettings.objects.get(user=profile_user)
except ObjectDoesNotExist:
profile_settings = SiteSettings(user=profile_user)
profile_settings.save()
cache.set("site_settings:%s" % username, profile_settings)
if not profile_user.is_active:
return render(request, "users/profile/profile_error.html", {"reason": "not_found"}, status=404)
if request.user != profile_user:
total_paste_count = cache.get("user_public_paste_count:%s" % profile_user.username)
else:
total_paste_count = cache.get("user_paste_count:%s" % profile_user.username)
# If user is viewing his own profile, also include hidden pastes
if total_paste_count == None and request.user != profile_user:
total_paste_count = Past
|
e.objects.filter(user=profile_user, removed=Paste.NO_REMOVAL).filter(hidden=Fa
|
lse).count()
cache.set("user_public_paste_count:%s" % profile_user.username, total_paste_count)
elif total_paste_count == None and request.user == profile_user:
total_paste_count = Paste.objects.filter(user=profile_user, removed=Paste.NO_REMOVAL).count()
cache.set("user_paste_count:%s" % profile_user.username, total_paste_count)
total_favorite_count = cache.get("user_favorite_count:%s" % profile_user.username)
if total_favorite_count == None:
total_favorite_count = Favorite.objects.filter(user=profile_user).count()
cache.set("user_favorite_count:%s" % profile_user.username, total_favorite_count)
args = {"profile_user": profile_user,
"profile_settings": profile_settings,
"current_page": page,
"tab": tab,
"total_favorite_count": total_favorite_count,
"total_paste_count": total_paste_count}
if tab == "home":
return home(request, args)
elif tab == "pastes":
return pastes(request, profile_user, args, page)
elif tab == "favorites":
return favorites(request, profile_user, args, page)
# The remaining pages require authentication, so redirect through settings()
else:
return settings(request, profile_user, args, tab)
def settings(request, username, args={}, tab="change_password"):
"""
Show a page which allows the user to change his settings
"""
if not request.user.is_authenticated():
return render(request, "users/settings/settings_error.html", {"reason": "not_logged_in"})
profile_user = User.objects.get(username=username)
if request.user.id != profile_user.id:
return render(request, "users/settings/settings_error.html", {"reason": "incorrect_user"})
if tab == "change_preferences":
return change_preferences(request, args)
if tab == "change_password":
return change_password(request, args)
elif tab == "delete_account":
return delete_account(request, args)
def home(request, args):
"""
Display user profile's home with the most recent pastes and favorites
"""
# Get favorites only if user has made them public
if args["profile_settings"].public_favorites or request.user == args["profile_user"]:
args["favorites"] = cache.get("profile_favorites:%s" % args["profile_user"].username)
if args["favorites"] == None:
args["favorites"] = Favorite.objects.filter(user=args["profile_user"]).order_by('-added').select_related('paste')[:10]
cache.set("profile_favorites:%s" % args["profile_user"].username, args["favorites"])
if request.user == args["profile_user"]:
args["pastes"] = cache.get("profile_pastes:%s" % args["profile_user"].username)
if args["pastes"] == None:
args["pastes"] = Paste.objects.get_pastes(args["profile_user"], include_hidden=True, count=10)
cache.set("profile_pastes:%s" % args["profile_user"
|
Sergey19940808/OurFoto
|
repository_our_fotos/urls.py
|
Python
|
mit
| 671
| 0.034277
|
# Imports
from django.conf.urls import url
from .models import OurFoto
from .views import HomeFoto, ShowFoto, DeleteFoto, AddFoto, \
Edit
|
Foto, SearchFoto
# Urls for app
urlpatterns = [
url(r'^$', HomeFoto.as_view(model = OurFoto), name = 'index'),
url(r'^foto/(?P<pk>\d+)/$', ShowFoto.as_view(model = OurFoto), name = 'foto'),
url(r'^add_foto/$', AddFoto.as_view(), name = 'add_foto'),
url(r'^edit_foto/(?P<pk>\d+)/$', EditFoto.as_view(model = OurFoto), name = 'edit_foto'),
url(r'^search_foto/$', SearchFoto.as_view(), name = 'search
|
_foto'),
url(r'^delete_foto/(?P<pk>\d+)/$', DeleteFoto.as_view(model = OurFoto), name = 'delete_foto')
]
|
Rootbuzz/Django-Socialtags
|
socialtags/templatetags/social_tags.py
|
Python
|
mit
| 1,400
| 0.007143
|
from django.template import Library
from django.conf import settings
if "django.contrib.sites" in settings.INSTALLED_APPS:
from django.contrib.sites.models import Site
current_domain = lambda: Site.objects.get_current().domain
elif getattr(settings, "SITE_DOMAIN", None):
current_domain = lambda: settings.SITE_DOMAIN
else:
current_domain = lambda: "example.com"
register = Library()
def fully_qualified(url):
# if it's not a string the rest of this fn will bomb
if not isinstance(url, basestring): return ""
if url.startswith('http'):
return url
elif url.startswith("/"):
return 'http://%s%s' % (current_domain(), url)
else:
return 'http://%s' % url
|
@register.inclusion_tag('social_tags/twitter.html')
def twitter_share(url=None):
url = fully_qualified(url)
return locals()
@register.inclusion_tag('social_tags/facebook.html')
def fac
|
ebook_share(url=None):
url = fully_qualified(url)
return locals()
@register.inclusion_tag('social_tags/linkedin.html')
def linkedin_share(url=None):
url = fully_qualified(url)
return locals()
@register.inclusion_tag('social_tags/email.html')
def email_share(url=None):
url = fully_qualified(url)
return locals()
@register.inclusion_tag('social_tags/google.html')
def google_plus(url=None):
url = fully_qualified(url)
return locals()
|
dvalcarce/filmyou-web
|
src/apps/films/models.py
|
Python
|
apache-2.0
| 9,791
| 0.000511
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db import models
from userena.models import UserenaBaseProfile
from caching.base import CachingManager, CachingMixin
from apps.utils.db import retrieve_in_order_from_db
from apps.utils import poster
from libs.cassandra import CassandraConnection
class Person(CachingMixin, models.Model):
"""
Person model.
"""
person_id = models.PositiveIntegerField(primary_key=True)
name = models.CharField(max_length=150)
objects = CachingManager()
def __unicode__(self):
return self.name
class Genre(CachingMixin, models.Model):
"""
Film genre model.
"""
genre_id = models.PositiveIntegerField(primary_key=True)
name = models.CharField(max_length=50)
objects = CachingManager()
def __unicode__(self):
return self.name
class Country(CachingMixin, models.Model):
"""
Film country model.
"""
country_id = models.PositiveIntegerField(primary_key=True)
name = models.CharField(max_length=50)
objects = CachingManager()
def __unicode__(self):
return self.name
class Language(CachingMixin, models.Model):
"""
Film country model.
"""
language_id = models.PositiveIntegerField(primary_key=True)
name = models.CharField(max_length=50)
objects = CachingManager()
def __unicode__(self):
return self.name
class Film(CachingMixin, models.Model):
"""
Film model
"""
film_id = models.PositiveIntegerField(primary_key=True)
imdb_id = models.PositiveIntegerField(unique=True)
netflix_id = models.PositiveIntegerField(null=True, unique=True)
title = models.CharField(max_length=300)
year = models.PositiveSmallIntegerField(null=True)
runtime = models.PositiveSmallIntegerField(null=True)
rating = models.CharField(max_length=24, null=True)
released = models.DateField(null=True)
plot = models.TextField(null=True)
metascore = models.PositiveIntegerField(null=True)
imdb_rating = models.FloatField(null=True, default=0)
imdb_votes = models.PositiveIntegerField(null=True, default=0)
fullplot = models.TextField(null=True)
post
|
er = models.URLField(null=True)
awards = models.PositiveIntegerField(null=True)
updated = models.DateField(null=True)
poster_
|
file = models.ImageField(upload_to='posters', null=True)
n_votes = models.PositiveIntegerField(default=0)
sum_votes = models.FloatField(default=0)
directors = models.ManyToManyField(Person, related_name="director")
writers = models.ManyToManyField(Person, related_name="writer")
casts = models.ManyToManyField(Person, related_name="cast")
genres = models.ManyToManyField(Genre)
countries = models.ManyToManyField(Country)
languages = models.ManyToManyField(Language)
objects = CachingManager()
def __unicode__(self):
return self.title
def get_absolute_url(self):
return reverse('films:details', args=[self.film_id])
def get_poster(self):
if not self.poster_file:
poster.retrieve(self)
return self.poster_file
@property
def score(self):
"""
Calculate film score:
(1/2) imdb_votes sum_votes
---------------- * imdb_rating + -----------
total_votes total_votes
:return:
"""
total_votes = self.imdb_votes + self.n_votes
if total_votes:
score = (self.imdb_votes * self.imdb_rating / 2.0 + self.sum_votes) / total_votes
else:
score = 0.0
return score
@property
def similar_films(self):
from libs.lucene import FilmSearcher
with FilmSearcher() as searcher:
return searcher.more_like_this(self)
def set_preference(self, user):
"""
Set the preference rated by the given user to the film.
:param user: user
"""
query = "SELECT score FROM ratings " \
"WHERE user = %(user)s AND item = %(item)s"
parameters = {
'user': user.user.id,
'item': self.film_id
}
# Retrieve ratings from Cassandra
with CassandraConnection() as db:
try:
self.preference = db.execute(query, parameters)[0].score
except IndexError:
self.preference = None
def rate(self, user, score):
"""
Update film model with a new rating and remove recommendation if exists.
:param user: user
:param score: score
"""
score = float(score)
self.set_preference(user)
insert_query = "INSERT INTO ratings (user, item, score) " \
"VALUES ( %(user)s, %(item)s, %(score)s )"
select_query = "SELECT relevance FROM recommendations " \
"WHERE user = %(user)s AND item = %(item)s"
parameters = {
'user': user.user.id,
'item': self.film_id,
'score': float(score)
}
with CassandraConnection() as db:
db.execute(insert_query, parameters)
result = db.execute(select_query, parameters)
if result:
delete_query = "DELETE FROM recommendations " \
"WHERE user = %(user)s " \
"AND relevance = %(relevance)s " \
"AND item = %(item)s"
parameters['relevance'] = result[0].relevance
db.execute(delete_query, parameters)
if self.preference:
score -= self.preference
else:
self.n_votes += 1
self.sum_votes += score
self.save()
class MyUser(CachingMixin, UserenaBaseProfile):
user = models.OneToOneField(User, unique=True, related_name='profile')
objects = CachingManager()
def get_preferences_for_films(self, films):
"""
Get the ratings for the given films
:param films: list of Film objects
:return: list of films with preference attribute set
"""
# query = "SELECT item, score FROM ratings WHERE user = %(user)s AND item IN %(films)s"
query = "SELECT item, score FROM ratings WHERE user = %(user)s AND item IN (" \
+ ", ".join([str(film.film_id) for film in films]) + ")"
parameters = {'user': self.user.id}
# Retrieve ratings from Cassandra
with CassandraConnection() as db:
ratings = db.execute(query, parameters)
# Set rating field
ratings_dict = {item: score for (item, score) in ratings}
for film in films:
film.preference = ratings_dict.get(film.film_id, None)
return films
def get_rated_films(self, last=None, count=12):
"""
Gets a list of rated films by self.
:param last: id of the last film queried or None
:param count: number of elements to be retrieved
:return: list of films with preference attribute set
"""
parameters = {
'user': self.user.id,
'limit': count
}
if last:
query = "SELECT item, score " \
"FROM ratings " \
"WHERE user = %(user)s AND item > %(last)s " \
"LIMIT " \
"%(limit)s"
parameters['last'] = last
else:
query = "SELECT item, score " \
"FROM ratings " \
"WHERE user = %(user)s " \
"LIMIT %(limit)s"
# Retrieve ratings from Cassandra
with CassandraConnection() as db:
ratings = db.execute(query, parameters)
# Retrieve films info from the RDBMS
ids = [item for (item, score) in ratings]
films = retrieve_in_order_from_db(Film, ids)
# Set rating field
ratings_dict = {item: score for (item, score) in ratings}
for film in films:
film.preferenc
|
dantebarba/docker-media-server
|
plex/Sub-Zero.bundle/Contents/Libraries/Shared/dateutil/test/test_parser.py
|
Python
|
gpl-3.0
| 31,922
| 0.000377
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from ._common import unittest
from datetime import datetime, timedelta, date
from dateutil.tz import tzoffset
from dateutil.parser import *
import six
from six import assertRaisesRegex, PY3
from six.moves import StringIO
class ParserTest(unittest.TestCase):
def setUp(self):
self.tzinfos = {"BRST": -10800}
self.brsttz = tzoffset("BRST", -10800)
self.default = datetime(2003, 9, 25)
# Parser should be able to handle bytestring and unicode
base_str = '2014-05-01 08:00:00'
try:
# Python 2.x
self.uni_str = unicode(base_str)
self.str_str = str(base_str)
except NameError:
self.uni_str = str(base_str)
self.str_str = bytes(base_str.encode())
def testEmptyString(self):
with self.assertRaises(ValueError):
parse('')
def testNone(self):
with self.assertRaises(TypeError):
parse(None)
def testInvalidType(self):
with self.assertRaises(TypeError):
parse(13)
def testDuckTyping(self):
# We want to support arbitrary classes that implement the stream
# interface.
class StringPassThrough(object):
def __init__(self, stream):
self.stream = stream
def read(self, *args, **kwargs):
return self.stream.read(*args, **kwargs)
dstr = StringPassThrough(StringIO('2014 January 19'))
self.assertEqual(parse(dstr), datetime(2014, 1, 19))
def testParseStream(self):
dstr = StringIO('2014 January 19')
self.assertEqual(parse(dstr), datetime(2014, 1, 19))
def testParseStr(self):
self.assertEqual(parse(self.str_str),
parse(self.uni_str))
def testParserParseStr(self):
from dateutil.parser import parser
self.assertEqual(parser().parse(self.str_str),
parser().parse(self.uni_str))
def testParseUnicodeWords(self):
class rus_parserinfo(parserinfo):
MONTHS = [("янв", "Январь"),
("фев", "Февраль"),
("мар", "Март"),
("апр", "Апрель"),
("май", "Май"),
("июн", "Июнь"),
("июл", "Июль"),
("авг", "Август"),
("сен", "Сентябрь"),
("окт", "Октябрь"),
("ноя", "Ноябрь"),
("дек", "Декабрь")]
self.assertEqual(parse('10 Сентябрь 2015 10:20',
|
parserinfo=rus_parserinfo()),
datetime(2015, 9, 10, 10, 20))
def testParseWithNulls(self):
# This re
|
lies on the from __future__ import unicode_literals, because
# explicitly specifying a unicode literal is a syntax error in Py 3.2
# May want to switch to u'...' if we ever drop Python 3.2 support.
pstring = '\x00\x00August 29, 1924'
self.assertEqual(parse(pstring),
datetime(1924, 8, 29))
def testDateCommandFormat(self):
self.assertEqual(parse("Thu Sep 25 10:36:28 BRST 2003",
tzinfos=self.tzinfos),
datetime(2003, 9, 25, 10, 36, 28,
tzinfo=self.brsttz))
def testDateCommandFormatUnicode(self):
self.assertEqual(parse("Thu Sep 25 10:36:28 BRST 2003",
tzinfos=self.tzinfos),
datetime(2003, 9, 25, 10, 36, 28,
tzinfo=self.brsttz))
def testDateCommandFormatReversed(self):
self.assertEqual(parse("2003 10:36:28 BRST 25 Sep Thu",
tzinfos=self.tzinfos),
datetime(2003, 9, 25, 10, 36, 28,
tzinfo=self.brsttz))
def testDateCommandFormatWithLong(self):
if not PY3:
self.assertEqual(parse("Thu Sep 25 10:36:28 BRST 2003",
tzinfos={"BRST": long(-10800)}),
datetime(2003, 9, 25, 10, 36, 28,
tzinfo=self.brsttz))
def testDateCommandFormatIgnoreTz(self):
self.assertEqual(parse("Thu Sep 25 10:36:28 BRST 2003",
ignoretz=True),
datetime(2003, 9, 25, 10, 36, 28))
def testDateCommandFormatStrip1(self):
self.assertEqual(parse("Thu Sep 25 10:36:28 2003"),
datetime(2003, 9, 25, 10, 36, 28))
def testDateCommandFormatStrip2(self):
self.assertEqual(parse("Thu Sep 25 10:36:28", default=self.default),
datetime(2003, 9, 25, 10, 36, 28))
def testDateCommandFormatStrip3(self):
self.assertEqual(parse("Thu Sep 10:36:28", default=self.default),
datetime(2003, 9, 25, 10, 36, 28))
def testDateCommandFormatStrip4(self):
self.assertEqual(parse("Thu 10:36:28", default=self.default),
datetime(2003, 9, 25, 10, 36, 28))
def testDateCommandFormatStrip5(self):
self.assertEqual(parse("Sep 10:36:28", default=self.default),
datetime(2003, 9, 25, 10, 36, 28))
def testDateCommandFormatStrip6(self):
self.assertEqual(parse("10:36:28", default=self.default),
datetime(2003, 9, 25, 10, 36, 28))
def testDateCommandFormatStrip7(self):
self.assertEqual(parse("10:36", default=self.default),
datetime(2003, 9, 25, 10, 36))
def testDateCommandFormatStrip8(self):
self.assertEqual(parse("Thu Sep 25 2003"),
datetime(2003, 9, 25))
def testDateCommandFormatStrip9(self):
self.assertEqual(parse("Sep 25 2003"),
datetime(2003, 9, 25))
def testDateCommandFormatStrip10(self):
self.assertEqual(parse("Sep 2003", default=self.default),
datetime(2003, 9, 25))
def testDateCommandFormatStrip11(self):
self.assertEqual(parse("Sep", default=self.default),
datetime(2003, 9, 25))
def testDateCommandFormatStrip12(self):
self.assertEqual(parse("2003", default=self.default),
datetime(2003, 9, 25))
def testDateRCommandFormat(self):
self.assertEqual(parse("Thu, 25 Sep 2003 10:49:41 -0300"),
datetime(2003, 9, 25, 10, 49, 41,
tzinfo=self.brsttz))
def testISOFormat(self):
self.assertEqual(parse("2003-09-25T10:49:41.5-03:00"),
datetime(2003, 9, 25, 10, 49, 41, 500000,
tzinfo=self.brsttz))
def testISOFormatStrip1(self):
self.assertEqual(parse("2003-09-25T10:49:41-03:00"),
datetime(2003, 9, 25, 10, 49, 41,
tzinfo=self.brsttz))
def testISOFormatStrip2(self):
self.assertEqual(parse("2003-09-25T10:49:41"),
datetime(2003, 9, 25, 10, 49, 41))
def testISOFormatStrip3(self):
self.assertEqual(parse("2003-09-25T10:49"),
datetime(2003, 9, 25, 10, 49))
def testISOFormatStrip4(self):
self.assertEqual(parse("2003-09-25T10"),
datetime(2003, 9, 25, 10))
def testISOFormatStrip5(self):
self.assertEqual(parse("2003-09-25"),
datetime(2003, 9, 25))
def testISOStrippedFormat(self):
self.assertEqual(parse("20030925T104941.5-0300"),
datetime(2003, 9, 25, 10, 49, 41, 500000,
tzinfo=self.brsttz))
def testISOStrippedFormatStrip1(self):
self.assertEqual(parse("20030925T104941-0300"),
datetime(2003, 9, 25, 10, 49, 41,
tzinfo=self.brsttz))
def testISOStrippedFormatStrip2(se
|
anthonyabeo/MeetNEat
|
api/oauth.py
|
Python
|
unlicense
| 3,846
| 0.00026
|
import json
from flask import session
from flask import url_for, redirect, request
from rauth import OAuth1Service, OAuth2Service
from meetneat.config import OAUTH_CREDENTIALS
class OAuthSignIn(object):
providers = None
def __init__(self, provider_name):
self.provider_name = provider_name
credentials = OAUTH_CREDENTIALS[provider_name]
self.consumer_id = credentials['id']
self.consumer_secret = credentials['secret']
def authorize(self):
pass
def callback(self):
pass
def get_callback_url(self):
return url_for('api.oauth_callback', provider=self.provider_name, _external=True)
@classmethod
def get_provider(cls, provider_name):
if cls.providers is None:
cls.providers = {}
for provider_class in cls.__subclasses__():
provider = provider_class()
cls.providers[provider.provider_name] = provider
return cls.providers[provider_name]
class FacebookSignIn(OAuthSignIn):
def __init__(self):
super(FacebookSignIn, self).__init__('facebook')
self.service = OAuth2Service(
name='facebook',
client_id=self.consumer_id,
client_secret=self.consumer_secret,
authorize_url='https://graph.facebook.com/oauth/authorize',
access_token_url='https://graph.facebook.com/oauth/access_token',
base_url='https://graph.facebook.com/'
)
def authorize(self):
return redirect(self.service.get_authorize_url(
scope='email',
response_type='code',
redirect_uri=self.get_callback_url())
)
def callback(self):
def decode_json(payload):
return json.loads(payload.decode('utf-8'))
if 'code' not in request.args:
return None, None, None
oauth_session = self.service.get_auth_session(
data={'code': request.args['code'],
'grant_type': 'authorization_code',
'redirect_uri': self.get_callback_url()},
decoder=decode_json
)
me = oauth_session.get('me').json()
return (
'facebook$' + me['id'],
me.get('email').split('@')[0], # Facebook does not provide
# username, so the email's user
# is used instead
me.get('email')
)
class TwitterSignIn(OAuthSignIn):
def __init__(self):
super(TwitterSignIn, self).__init__('twitter')
self.service = OAuth1Service(
name='twitter',
consumer_key=self.consumer_id,
consumer_secret=self.consumer_secret,
request_token_url='https://api.twitter.com/oauth/request_token',
authorize_url='https://api.twitter.com/oauth/authorize',
access_token_url='https://api.twitter.com/oauth/access_token',
base_url='https://api.twitter.com/1.1/'
)
def authorize(self):
request_token = self.service.get_request_token(
params={'oauth_callback': self.get_callback_url()}
)
session['request_token'] = request_token
return redirect(self.service.get_authorize_url(request_token[0]))
def callback(self):
request_token = session.pop('request_token')
if 'oauth_verifier' not in request.args:
return None, None, None
oauth_session = self.service.get_
|
auth_session(
request_token[0],
request_token[1],
data={'oauth_verifier': request.args['oauth_verifier']}
)
me = oauth_session.get('account/verify_credentials.json').json()
social_id = 'twitter$' + str(me.get('id'))
username = me.get('screen_name')
return social_id, username, None # Twitter
|
does not provide email
|
Caveat4U/home_automation
|
trigger.py
|
Python
|
gpl-2.0
| 793
| 0.011349
|
fr
|
om abc import ABCMeta,abstractmethod
from my_hue import *
# Would dynamically choose a trigger based on trigger type
def trigger_factory(trigger_type):
return None
class Trigger(object):
__m
|
etaclass__ = ABCMeta
def __init__(self):
self.action()
@abstractmethod
def action(self):
pass
class IClickerTrigger(object):
def __init__(self, clicker_id, response_info, time_of_trigger, sequence_number):
super(IClickerTrigger, self).__init__()
self.clicker_id = clicker_id
self.response_info = response_info
self.time_of_trigger = time_of_trigger
self.sequence_number = sequence_number
def action(self):
print self.response_info
button = 'a'
if button == 'a':
pass
|
LCAS/teaching
|
turtlebot_simulator/turtlebot_stdr/nodes/tf_connector.py
|
Python
|
mit
| 741
| 0.002699
|
#!/usr/bin/env python
__author__ = 'mehdi tlili'
import rospy
fr
|
om tf2_msgs.msg import TFMessage
import tf
class Remapper(object):
def __init__(self):
self.br = tf.TransformBroadcaster()
rospy.Subscriber("/tf", TFMessage, self.tf_remapper)
def tf_remapper(self, msg):
if msg.transforms[0].header.frame_id == "/robot0":
self.br.sendTransform((0, 0, 0),
|
tf.transformations.quaternion_from_euler(0, 0, 0),
rospy.Time.now(),
"base_footprint",
"robot0")
if __name__ == '__main__':
rospy.init_node('remapper_nav')
remapper = Remapper()
rospy.spin()
|
Stratoscale/inaugurator
|
inaugurator/tests/test_idlistener.py
|
Python
|
apache-2.0
| 5,274
| 0.000948
|
import os
import sys
import mock
import logging
import unittest
from inaugurator.server import idlistener
from inaugurator.tests.common import PikaChannelMock
class Test(unittest.TestCase):
def setUp(self):
self.consumeCallback = mock.Mock()
self.channel = PikaChannelMock(self)
self.expectedStatusExchange = idlistener.statusExchange("delta-foxtrot")
self.tested = idlistener.IDListener("delta-foxtrot", self.consumeCallback, self.channel)
def test_Listen(self):
self.validateListenHappyFlow()
def test_StopListening(self):
queue = self.validateListenHappyFlow()
self.tested.stopListening()
self.validateOneStatusQueueIsAllocated(queue, allowOtherRequests=True)
self.channel.answerQueueDelete(queue)
self.validateNoStatusQueueIsAllocated()
self.validateMessages(self.basicConsumeCallback, isArrivalExpected=False)
def test_StopListeningBeforeExchangeDeclared(self):
self.validateNoStatusQueueIsAllocated()
self.tested.stopListening()
self.validateNoStatusQueueIsAllocated()
self.channel.answerExchangeDeclare(self.expectedStatusExchange)
self.validateNoStatusQueueIsAllocated()
def test_StopListeningBeforeQueueDeclared(self):
self.validateListenFlowUntilStatusQueueDeclare()
self.validateOneStatusQueueIsAllocating()
self.tested.stopListening()
self.validateOneStatusQueueIsAllocating()
queue = self.channel.answerQueueDeclare()
self.validateOneStatusQueueIsAllocated(queue, allowOtherRequests=True)
self.channel.answerQueueDelete(queue)
self.validateNoStatusQueueIsAllocated()
def test_StopListeningBeforeQueueBinded(self):
self.validateListenFlowUntilStatusQueueDeclare()
queue = self.channel.answerQueueDeclare()
self.validateOneStatusQueueIsAllocated(queue)
self.tested.stopListening()
self.validateOneStatusQueueIsAllocated(queue, allowOtherRequests=True)
queueBindCallback = self.channel.getQueueBindCallback()
queueBindCallback(queue)
self.validateOneStatusQueueIsAllocated(queue, allowOtherRequests=True)
self.channel.answerQueueDelete(queue)
self.validateNoStatusQueueIsAllocated(allowOtherRequests=True)
def test_StopListeningTwice(self):
queue = self.validateListenHappyFlow()
self.tested.stopListening()
self.channel.answerQueueDelete(queue)
self.validateNoStatusQueueIsAllocated()
self.tested.stopListening()
self.validateNoStatusQueueIsAllocated()
def test_MoreThanOneInstance(self):
for i in xrange(10):
queue = self.validateListenHappyFlow()
self.tested.stopListening()
self.channel.answerQueueDelete(queue)
self.validateNoStatusQueueIsAllocated()
self.tested = idlistener.IDListener("delta-foxtrot", self.consumeCallback, self.channel)
self.validateNoStatusQueueIsAllocated()
def validateListenFlowUntilStatusQueueDeclare(self):
self.validateNoStatusQueueIsAllocated()
self.channel.answerExchangeDeclare(self.expectedStatusExchange)
self.validateOneStatusQueueIsAllocating()
def validateListenFlowAfterQueueDeclare(self, queue):
queueBindCallback = self.channel.getQueueBindCallback()
queueBindCallback(queue)
self.basicConsumeCallback = self.channel.getBasicConsumeCallback()
self.validateMessages(self.basicConsumeCallback)
self.validateOneStatusQueueIsAllocated(queue)
def validateListenHappyFlow(self):
self.validateListenFlowUntilStatusQueueDeclare()
queue = self.channel.answerQueueDeclare()
self.validateListenFlowAfterQueueDeclare(queue)
self.validateOneStatusQueueIsAllocated(queue)
return queue
def validateMessages(self, basicConsumeCallback, isArrivalExpected=True):
message = 'I am a cool message.'
basicConsumeCallback(message)
self.assertEquals(self.consumeCallback.called, isArrivalExpected)
self.consumeCallback.reset_mock()
def validateOneStatusQueueIsAllocated(self, queue, allowOtherRequests=False):
self.assertEquals(set([queue]), self.channel.declaredQueues)
if not allowOtherRequests:
self.assertFalse(self.channel.requests)
def validateOneStatusQueueIsAllocating(self, allowDeleteRequests=False):
self.assertEquals(len(self.channel.requests), 1)
self.assertEquals(self.channel.requests[0][0], "declare")
if not allowDeleteRequests:
self.assertFalse(self.channel.declaredQueues)
def validateNoStatusQueueIsAllocated(self, allowOtherRequests=False):
self.
|
assertFalse(self.channel.declaredQueue
|
s)
if not allowOtherRequests:
self.assertFalse(self.channel.requests)
self.assertFalse(self.channel.queue_bind.called)
self.assertFalse(self.channel.basic_consume.called)
if __name__ == '__main__':
_logger = logging.getLogger("inaugurator.server")
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
_logger.addHandler(handler)
_logger.setLevel(logging.DEBUG)
unittest.main()
|
box-community/box-weekly-stats
|
add_shared_to_group.py
|
Python
|
apache-2.0
| 3,242
| 0.005861
|
""" Copyright 2015 Kris Steinhoff, The University of Michigan
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import box
import ConfigParser, csv, optparse, os, re, sys, json
from pprint import pprint
def update_counter(message):
sys.stdout.write("\r"+ message)
sys.stdout.flush()
# sys.stdout.write("\n")
def human_file_size(size): # http://stackoverflow.com/a/1094933/70554
format = "%3.1f %s"
tiers = ["bytes","KB","MB","GB"]
for t in tiers[:-1]:
if size < 1024.0:
return format % (size, t)
size /= 1024.0
return format % (size, tiers[-1])
def median(values):
values.sort()
count = len(values)
if co
|
unt % 2 == 1:
return values[count/2]
else:
return ( values[(count/2)-1] + values[count/2] ) / 2.0
if __na
|
me__ == "__main__":
parser = optparse.OptionParser()
parser.add_option("-d", "--dry-run", action="store_true", dest="dry_run", default=False, help="simulate changes")
(options, args) = parser.parse_args()
box = box.BoxApi()
config = ConfigParser.ConfigParser()
settings_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), "settings.conf")
config.read(settings_file)
try:
group_id = config.get("add_shared_to_group", "group_id")
except:
print "group_id not configured (in add_shared_to_group section)"
sys.exit(1)
if len(args) > 0:
infile = csv.reader(open(args[0], "rb"))
else:
infile = csv.reader(sys.stdin)
headers = infile.next()
role_rules = {
"student": re.compile(r"(Enrolled)?Student(AA|DBRN|FLNT)"),
"staff": re.compile(r"(Regular|Temporary)Staff(AA|DBRN|FLNT)"),
"faculty": re.compile(r"Faculty(AA|DBRN|FLNT)"),
"sponsored": re.compile(r"SponsoredAffiliate(AA|DBNR|FLNT)")
}
types = ("user", "shared")
storage = ([], [])
affiliations = {}
roles = dict.fromkeys(role_rules.keys(), 0)
ids = []
for attr_values in infile:
attrs = dict(zip(headers, attr_values))
id = attrs["box_id"]
if attrs["box_account_type"].lower() == "shared":
ids.append(id)
for id in ids:
data = json.dumps({ "user": {"id": id}, "group": {"id": group_id, "role": "member"}})
if options.dry_run:
print data
else:
r = box.request("POST", "/group_memberships", data=data)
if r.status_code == 201:
print "User ID %s added to group." % id
elif r.status_code == 409:
print "User ID %s NOT added to group already exists." % id
else:
print "WARNING: Received an unexpected response:"
print r.text
|
winnerineast/Origae-6
|
origae/model/__init__.py
|
Python
|
gpl-3.0
| 352
| 0
|
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from
|
.images import (
ImageClassificationModelJob,
GenericImageModelJob,
ImageModelJob,
)
from
|
.job import ModelJob
__all__ = [
'ImageClassificationModelJob',
'GenericImageModelJob',
'ImageModelJob',
'ModelJob',
]
|
gallupliu/QA
|
preprocess/data_helper.py
|
Python
|
apache-2.0
| 4,978
| 0.004219
|
#coding=utf-8
import codecs
import logging
import numpy as np
import os
from collections import defaultdict
# define a logger
logging.basicConfig(format="%(message)s", level=logging.INFO)
def load_embedding(filename, embedding_size):
"""
load embedding
"""
embeddings = []
word2idx = defaultdict(list)
idx2word = defaultdict(list)
idx = 0
with codecs.open(filename, mode="r", encoding="utf-8") as rf:
try:
for line in rf.readlines():
idx += 1
arr = line.split(" ")
if len(arr) != (embedding_size + 2):
logging.error("embedding error, index is:%s"%(idx))
continue
embedding = [float(val) for val in arr[1 : -1]]
word2idx[arr[0]] = len(word2idx)
idx2word[len(word2idx)] = arr[0]
embeddings.append(embedding)
except Exception as e:
logging.error("load embedding Exception," , e)
finally:
rf.close()
logging.info("load embedding finish!")
return embeddings, word2idx, idx2word
def sent_to_idx(sent, word2idx, sequence_len):
"""
convert sentence to index array
"""
unknown_id = word2idx.get("UNKNOWN", 0)
sent2idx = [word2idx.get(word, unknown_id) for word in sent.split("_")[:sequence_len]]
return sent2idx
def load_train_data(filename, word2idx, sequence_len):
"""
load train data
"""
ori_quests, cand_quests = [], []
with codecs.open(filename, mode="r", encoding="utf-8") as rf:
try:
for line in rf.readlines():
arr = line.strip().split(" ")
if len(arr) != 4 or arr[0] != "1":
logging.error("invalid data:%s"%(line))
continue
ori_quest = sent_to_idx(arr[2], word2idx, sequence_len)
cand_quest = sent_to_idx(arr[3], word2idx, sequence_len)
ori_quests.append(ori_quest)
cand_quests.append(cand_quest)
except Exception as e:
logging.error("load train data Exception," + e)
finally:
rf.close()
logging.info("load train data finish!")
return ori_quests, cand_quests
def create_valid(data, proportion=0.1):
if data is None:
logging.error("data is none")
os._exit(1)
data_len = len(data)
shuffle_idx = np.random.permutation(np.arange(data_len))
data = np.array(data)[shuffle_idx]
seperate_idx = int(data_len * (1 - proportion))
return data[:seperate_idx], data[seperate_idx:]
def load_test_data(filename, word2idx, sequence_len):
"""
load test data
"""
ori_quests, cand_quests, labels, qids = [], [], [], []
with codecs.open(filename, mode="r", encoding="utf-8") as rf:
try:
for line in rf.readlines():
arr = line.strip().split(" ")
if len(arr) != 4:
logging.error("invalid data:%s"%(line))
continue
ori_quest = sent_to_idx(arr[2], word2idx, sequence_len)
cand_quest = sent_to_idx(arr[3], word2idx, sequence_len)
label = int(arr[0])
result = int(arr[1].split(":")[1])
ori_quests.append(ori_quest)
cand_quests.append(cand_quest)
labels.append(label)
|
qids.append(result)
except Exception as e:
logging.error("load test error," , e)
|
finally:
rf.close()
logging.info("load test data finish!")
return ori_quests, cand_quests, labels, qids
def batch_iter(ori_quests, cand_quests, batch_size, epoches, is_valid=False):
"""
iterate the data
"""
data_len = len(ori_quests)
batch_num = int(data_len / batch_size)
ori_quests = np.array(ori_quests)
cand_quests = np.array(cand_quests)
for epoch in range(epoches):
if is_valid is not True:
shuffle_idx = np.random.permutation(np.arange(batch_num * batch_size))
ori_quests = np.array(ori_quests)[shuffle_idx]
cand_quests = np.array(cand_quests)[shuffle_idx]
for batch in range(batch_num):
start_idx = batch * batch_size
end_idx = min((batch + 1) * batch_size, data_len)
act_batch_size = end_idx - start_idx
# get negative questions
if is_valid:
neg_quests = cand_quests[start_idx : end_idx]
else:
randi_list = []
while len(randi_list) != act_batch_size:
[randi_list.append(idx) for idx in np.random.randint(0, data_len, 5 * act_batch_size) if start_idx < idx < end_idx and len(randi_list) < act_batch_size]
neg_quests = [cand_quests[idx] for idx in randi_list]
yield (ori_quests[start_idx : end_idx], cand_quests[start_idx : end_idx], neg_quests)
|
disqus/psycopg2-managed-connection
|
setup.py
|
Python
|
apache-2.0
| 971
| 0.00309
|
import os
from setuptools import (
find_packages,
setup,
)
from setuptools.command.test import test
PACKAGE_DIR = 'src'
class PyTest(test):
def finalize_options(self):
test.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest, sys
errno = pytest.main(self.test_args)
sys.exit(errno)
setup(
name='psycopg2-managed-co
|
nnection',
description='Thread-safe connection manager for psycopg2 connections.',
version='1.0.0',
author='Ted Kaemming, Disqus',
author_email='ted@disqus.com',
license='Apache License 2.0',
setup_requires=(
'setuptools>=8.0',
),
install_requires=(
'psycopg2~=2.6'
|
,
),
packages=find_packages(PACKAGE_DIR),
package_dir={
'': PACKAGE_DIR,
},
zip_safe=False,
cmdclass = {
'test': PyTest,
},
tests_require=(
'pytest~=2.7',
),
)
|
svviz/svviz
|
src/ssw/ssw_wrap.py
|
Python
|
mit
| 15,367
| 0.00898
|
"""
@package ssw_wrap
@brief Simple python wrapper for SSW align library
To use the dynamic library libssw.so you may need to modify the LD_LIBRARY_PATH environment
variable to include the library directory (export LD_LIBRARY_PATH=$PWD) or for definitive
inclusion of the lib edit /etc/ld.so.conf and add the path or the directory containing the
library and update the cache by using /sbin/ldconfig as root
@copyright [The MIT licence](http://opensource.org/licenses/MIT)
@author Clement & Adrien Leger - 2014
"""
#~~~~~~~GLOBAL IMPORTS~~~~~~~#
# Standard library packages
from ctyp
|
es import *
import os
def _get_libssw_path():
base = os.path.dirname(__file__)
matches = [x for x in os.listdir(base) if (x.startswith("libssw") & x.endswith(".so"))]
if len(matches) < 1:
raise Exception("Couldn't find libssw.so in this directory: '{}'".format(base))
return os.path.join(base, matches[0])
libssw = cdll.LoadLibrary(_get_libssw_path())#
|
os.path.join(os.path.dirname(__file__), 'libssw.so'))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
class CAlignRes(Structure):
"""
@class SSWAlignRes
@brief ctypes Structure with s_align struct mapping returned by SSWAligner.Align func
Correspond to the structure of the query profile
"""
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#~~~~~~~Ctype Structure~~~~~~~#
_fields_ = [('score', c_uint16),
('score2', c_uint16),
('ref_begin', c_int32),
('ref_end', c_int32),
('query_begin', c_int32),
('query_end', c_int32),
('ref_end2', c_int32),
('cigar', POINTER(c_uint32)),
('cigarLen', c_int32)]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
class Aligner(object):
"""
@class SSWAligner
@brief Wrapper for SSW align library
"""
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#~~~~~~~CLASS VARIABLES~~~~~~~#
# Dictionnary to map Nucleotide to int as expected by the SSW C library
base_to_int = { 'A':0, 'C':1, 'G':2, 'T':3, 'N':4, 'a':0, 'c':1, 'g':2, 't':3, 'n':4}
int_to_base = { 0:'A', 1:'C', 2:'G', 3:'T', 4:'N'}
# Load the ssw library using ctypes
# libssw = cdll.LoadLibrary('libssw.so')
#libssw = cdll.LoadLibrary(_get_libssw_path())#os.path.join(os.path.dirname(__file__), 'libssw.so'))
# Init and setup the functions pointer to map the one specified in the SSW lib
# ssw_init method
ssw_init = libssw.ssw_init
ssw_init.restype = c_void_p
ssw_init.argtypes = [POINTER(c_int8), c_int32, POINTER(c_int8), c_int32, c_int8]
# init_destroy function
init_destroy = libssw.init_destroy
init_destroy.restype = None
init_destroy.argtypes = [c_void_p]
# ssw_align function
ssw_align = libssw.ssw_align
ssw_align.restype = POINTER(CAlignRes)
ssw_align.argtypes = [c_void_p, POINTER(c_int8), c_int32, c_uint8, c_uint8, c_uint8, c_uint16, c_int32, c_int32]
# align_destroy function
align_destroy = libssw.align_destroy
align_destroy.restype = None
align_destroy.argtypes = [POINTER(CAlignRes)]
#~~~~~~~FONDAMENTAL METHODS~~~~~~~#
def __repr__(self):
msg = self.__str__()
msg += "SCORE PARAMETERS:\n"
msg += " Gap Weight Open: {} Extension: {}\n".format(-self.gap_open, -self.gap_extend)
msg += " Align Weight Match: {} Mismatch: {}\n\n".format(self.match, -self.mismatch)
msg += " Match/mismatch Score matrix\n"
msg += " \tA\tC\tG\tT\tN\n"
msg += " A\t{}\t{}\t{}\t{}\t{}\n".format(self.match, -self.mismatch, -self.mismatch, -self.mismatch, 0)
msg += " C\t{}\t{}\t{}\t{}\t{}\n".format(-self.mismatch, self.match, -self.mismatch, -self.mismatch, 0)
msg += " G\t{}\t{}\t{}\t{}\t{}\n".format(-self.mismatch, -self.mismatch, self.match, -self.mismatch, 0)
msg += " T\t{}\t{}\t{}\t{}\t{}\n".format(-self.mismatch, -self.mismatch, -self.mismatch, self.match, 0)
msg += " N\t{}\t{}\t{}\t{}\t{}\n\n".format(0,0,0,0,0)
msg += "RESULT PARAMETERS:\n"
msg += " Report cigar {}\n".format(self.report_cigar)
msg += " Report secondary match {}\n\n".format(self.report_secondary)
msg += "REFERENCE SEQUENCE :\n"
if self.ref_len <= 50:
msg += "".join([self.int_to_base[i] for i in self.ref_seq])+"\n"
else:
msg += "".join([self.int_to_base[self.ref_seq[i]] for i in range(50)])+"...\n"
msg += " Lenght :{} nucleotides\n".format(self.ref_len)
return msg
def __str__(self):
return "\n<Instance of {} from {} >\n".format(self.__class__.__name__, self.__module__)
def __init__(self,
ref_seq="",
match=2,
mismatch=2,
gap_open=3,
gap_extend=1,
report_secondary=False,
report_cigar=False):
"""
Initialize object by creating an interface with ssw library fonctions
A reference sequence is also assigned to the object for multiple alignment against queries
with the align function
@param ref_seq Reference sequence as a python string (case insensitive)
@param match Weight for a match
@param mismatch Absolute value of mismatch penalty
@param gap_open Absolute value of gap open penalty
@param gap_extend Absolute value of gap extend penalty
@param report_secondary Report the 2nd best alignement if true
@param report_cigar Report cigar string if true
"""
# Store overall alignment parameters
self.report_secondary = report_secondary
self.report_cigar = report_cigar
# Set gap penalties
self.set_gap(gap_open, gap_extend)
# Set the cost matrix
self.set_mat(match, mismatch)
# Set the reference sequence
self.set_ref(ref_seq)
#~~~~~~~SETTERS METHODS~~~~~~~#
def set_gap(self, gap_open=3, gap_extend=1):
"""
Store gapopen and gap extension penalties
"""
self.gap_open = gap_open
self.gap_extend = gap_extend
def set_mat(self, match=2, mismatch=2):
"""
Store match and mismatch scores then initialize a Cost matrix and fill it with match and
mismatch values. Ambiguous base: no penalty
"""
self.match = match
self.mismatch = mismatch
mat_decl = c_int8 * 25
self.mat = mat_decl(match, -mismatch, -mismatch, -mismatch, 0,
-mismatch, match, -mismatch, -mismatch, 0,
-mismatch, -mismatch, match, -mismatch, 0,
-mismatch, -mismatch, -mismatch, match, 0,
0, 0, 0, 0, 0)
def set_ref(self, ref_seq):
"""
Determine the size of the ref sequence and cast it in a c type integer matrix
"""
if ref_seq:
self.ref_len = len(ref_seq)
self.ref_seq = self._DNA_to_int_mat (ref_seq, self.ref_len)
else:
self.ref_len = 0
self.ref_seq = ""
#~~~~~~~PUBLIC METHODS~~~~~~~#
def align(self, query_seq, min_score=0, min_len=0):
"""
Perform the alignment of query against the object reference sequence
@param query_seq Query sequence as a python string (case insensitive)
@param min_score Minimal score of match. None will be return in case of filtering out
@param min_len Minimal length of match. None will be return in case of filtering out
@return A SSWAlignRes Object containing informations about the alignment.
"""
# Determine the size of the ref sequence and cast it in a c type integer matrix
query_len = len(query_seq)
query_seq = self._DNA_to_int_mat (query_seq, query_len)
# Create the query profile using the query sequence
|
TrimBiggs/calico
|
calico/etcddriver/driver.py
|
Python
|
apache-2.0
| 43,284
| 0.000046
|
# -*- coding: utf-8 -*-
# Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
calico.etcddriver.driver
~~~~~~~~~~~~~~~~~~~~~~~~
Contains the logic for the etcd driver process, which monitors etcd for
changes and sends them to Felix over a unix socket.
The driver is responsible for
* loading the configuration from etcd at start-of-day (Felix needs this before
it can receive further updates)
* handling the initial load of data from etcd
* watching etcd for changes
* doing the above in parallel and merging the result into a consistent
sequence of events
* resolving directory deletions so that if a directory is deleted, it tells
Felix about all the individual keys that are deleted.
"""
import logging
import random
import socket
from Queue import Queue, Empty
from functools import partial
from ijson import JSONError
from calico.stats import AggregateStat, RateStat
try:
# simplejson is a faster drop-in replacement.
import simplejson as json
except ImportError:
import json
from threading import Thread, Event, Lock
import time
from urlparse import urlparse
from ijson.backends import yajl2 as ijson
from urllib3 import HTTPConnectionPool, HTTPSConnectionPool
import urllib3.exceptions
import httplib
from calico.etcddriver.protocol import (
MessageReader, MSG_TYPE_INIT, MSG_TYPE_CONFIG, MSG_TYPE_RESYNC,
MSG_KEY_ETCD_URLS, MSG_KEY_HOSTNAME, MSG_KEY_LOG_FILE, MSG_KEY_SEV_FILE,
MSG_KEY_SEV_SYSLOG, MSG_KEY_SEV_SCREEN, STATUS_WAIT_FOR_READY,
STATUS_RESYNC, STATUS_IN_SYNC, MSG_TYPE_CONFIG_LOADED,
MSG_KEY_GLOBAL_CONFIG, MSG_KEY_HOST_CONFIG, MSG_TYPE_UPDATE, MSG_KEY_KEY,
MSG_KEY_VALUE, MessageWriter, MSG_TYPE_STATUS, MSG_KEY_STATUS,
MSG_KEY_KEY_FILE, MSG_KEY_CERT_FILE, MSG_KEY_CA_FILE, WriteFailed,
SocketClosed)
from calico.etcdutils import ACTION_MAPPING
from calico.common import complete_logging
from calico.monotonic import monotonic_time
from calico.datamodel_v1 import (
READY_KEY, CONFIG_DIR, dir_for_per_host_config, VERSION_DIR,
ROOT_DIR)
from calico.etcddriver.hwm import HighWaterTracker
_log = logging.getLogger(__name__)
# Bound on the size of the queue between watcher and resync thread. In
# general, Felix and the resync thread process much more quickly than the
# watcher can read from etcd so this is defensive.
WATCHER_QUEUE_SIZE = 20000
# Threshold in seconds for detecting watcher tight looping on exception.
REQ_TIGHT_LOOP_THRESH = 0.2
# How often to log stats.
STATS_LOG_INTERVAL = 30
class EtcdDriver(object):
def __init__(self, felix_sck):
# Wrap the socket with our protocol reader/writer objects.
self._msg_reader = MessageReader(felix_sck)
self._msg_writer = MessageWriter(felix_sck)
# Global stop event used to signal to all threads to stop.
self._stop_event = Event()
# Threads to own the connection from/to Felix. The resync thread
# is responsible for doing resyncs and merging updates from the
# watcher thread (which it manages).
self._reader_thread = Thread(target=self._read_from_socket,
name="reader-thread")
self._reader_thread.daemon = True
self._resync_thread = Thread(target=self._resync_and_merge,
name="resync-thread")
self._resync_thread.daemon = True
self._watcher_thread = None # Created on demand
self._watcher_stop_event = None
self._watcher_start_index = None
# High-water mark cache. Owned by resync thread.
self._hwms = HighWaterTracker()
self._first_resync = True
self._resync_http_pool = None
self._cluster_id = None
# Resync thread stats.
self._snap_keys_processed = RateStat("snapshot keys processed")
self._event_keys_processed = RateStat("event keys processed")
self._felix_updates_sent = RateStat("felix updates sent")
self._resync_stats = [
self._snap_keys_processed,
self._event_keys_processed,
self._felix_updates_sent,
]
self._last_resync_stat_log_time = monotonic_time()
# Set by the reader thread once the init message has been received
# from Felix.
self._init_received = Event()
# Initial config, received in the init message.
self._etcd_base_url = None
self._etcd_other_urls = []
# Lock for the etcd url fields: this is the only lock, and no thread
# ever recursively acquires it, so it cannot deadlock. Must be locked
# to access the _etcd_base_url and _etcd_other_urls fields (after they
# are initialized).
self._etcd_url_lock = Lock()
self._hostname = None
# Set by the reader thread once the logging config has been received
# from Felix. Triggers the first resync.
self._config_received = Event()
# Flag to request a resync. Set by the reader thread, polled by the
# resync and merge thread.
self._resync_requested = False
def start(self):
"""Starts the driver's reader and resync threads."""
self._reader_thread.start()
self._resync_thread.start()
def join(self, timeout=None):
"""
Blocks until the driver stops or until timeout expires.
:returns True if the driver stopped, False on timeout.
"""
self._stop_event.wait(timeout=timeout)
stopped = self._stop_event.is_set()
if stopped:
self._resync_thread.join(timeout=timeout)
resync_alive = self._resync_thread.is_alive()
stopped &= not resync_alive
_log.debug("Resync thread alive: %s", resync_alive)
self._reader_thread.join(timeout=timeout)
reader_alive = self._reader_thread.is_alive()
stopped &= not reader_alive
_log.debug("Reader thread alive: %s", reader_alive)
try:
self._watcher_thread.join(timeout=timeout)
watcher_alive = self._watcher_thread.is_alive()
stopped &= not watcher_alive
_log.debug("Watcher thread alive: %s", watcher_alive)
except AttributeError:
pass
return stopped
def stop(self):
_log.info("Stopping driver")
self._stop_event.set()
def _read_from_socket(self):
"""
Thread: reader thread. Reads messages from Felix and fans t
|
hem out.
"""
try:
while not self._stop_event.is_set():
for msg_type, msg in self._msg_reader.new_messages(timeout=1):
if msg_type == MSG_TYPE_INIT:
|
# Init message, received at start of day.
self._handle_init(msg)
elif msg_type == MSG_TYPE_CONFIG:
# Config message, expected after we send the raw
# config to Felix.
self._handle_config(msg)
elif msg_type == MSG_TYPE_RESYNC:
# Request to do a resync.
self._handle_resync(msg)
else:
_log.error("Unexpected message from Felix: %s", msg)
raise RuntimeError("Unexpected message from Felix")
except SocketClosed:
_log.warning("Felix closed its socket. The driver must exit.")
except DriverShutdown:
_log.warning("Reader thread stopping due to driver shutdown.")
finally:
_log.info("Reader thread shutting down, tr
|
imito/odin
|
odin/utils/progbar.py
|
Python
|
mit
| 17,930
| 0.010095
|
# -*- coding: utf-8 -*-
##################################################################
# Example of usage:
##################################################################
from __future__ import print_function, division, absolute_import
import sys
import time
import inspect
from numbers import Number
from datetime import datetime
from contextlib import contextmanager
from collections import OrderedDict, defaultdict
import numpy as np
try:
from tqdm import __version__ as tqdm_version
tqdm_version = int(tqdm_version.split(".")[0])
if tqdm_version < 4:
raise ImportError
from tqdm import tqdm as _tqdm
from tqdm._utils import _environ_cols_wrapper
except ImportError:
sys.stderr.write("[ERROR] Cannot import `tqdm` version >= 4.\n")
exit()
try:
import colorama
colorama.init()
from colorama import Fore as _Fore
_RED = _Fore.RED
_YELLOW = _Fore.YELLOW
_CYAN = _Fore.CYAN
_MAGENTA = _Fore.MAGENTA
_RESET = _Fore.RESET
except ImportError:
_RED, _YELLOW, _CYAN, _MAGENTA, _RESET = '', '', '', '', ''
_NUMBERS_CH = {
ord('0'): 0,
ord('1'): 1,
ord('2'): 2,
ord('3'): 3,
ord('4'): 4,
ord('5'): 5,
ord('6'): 6,
ord('7'): 7,
ord('8'): 8,
ord('9'): 9,
}
# ===========================================================================
# Helper
# ===========================================================================
_LAST_UPDATED_PROG = [None]
def add_notification(msg):
msg = _CYAN + "[%s]Notification:" % \
datetime.now().strftime('%d/%b-%H:%M:%S') + _RESET + msg + ''
_tqdm.write(msg)
class _FuncWrap(object):
def __init__(self, func, default_func=lambda x: x):
super(_FuncWrap, self).__init__()
if func is None:
func = default_func
assert inspect.isfunction(func), \
"Invalid function object of type: %s" % str(type(func))
self.func = func
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
def __getstate__(self):
import dill
return dill.dumps(self.func)
def __setstate__(self, states):
import dill
self.func = dill.loads(states)
def _default_dict_list_creator():
return defaultdict(list)
# ===========================================================================
# Progress bar
# ===========================================================================
class Progbar(object):
""" Comprehensive review of any progress, this object is
fully pickle-able, and can be used for storing history,
summaries and report of the progress as well.
Parameters
----------
target: int
total number of steps expected
interval: float
Minimum progress display update interval, in seconds.
keep: bool
whether to keep the progress bar when the epoch finished
print_report: bool
print updated report along with the progress bar for each update
print_summary: bool
print epoch summary after each epoch
count_func: call-able
a function takes the returned batch and return an integer for upating
progress.
report_func: call-able
a function takes the returned batch and a collection of pair
(key, value) for constructing the report.
progress_func : call-able
for post-processing the return value during processing into
a number representing addition in the progress
name: str or None
specific name for the progress bar
Examples
--------
>>> import numpy as np
>>> from odin.utils import Progbar
>>> x = list(range(10))
>>> for i in Progbar(target=x):
... pass
Note
----
Some special case:
|
* any report key contain "confusionmatrix" will be printed out using
`print_confusion`
* any report key
"""
FP = sys.stderr
def __init__(self, target, interval=0.08, keep=False,
print_progress=True, print_report=True, print_summary=False,
count_func=None, report_func=None, progress_func=None,
name=None):
self.__pb = None # tqdm object
if isinstanc
|
e(target, Number):
self.target = int(target)
self.__iter_obj = None
elif hasattr(target, '__len__'):
self.target = len(target)
self.__iter_obj = target
else:
raise ValueError("Unsupport for `target` type: %s" %
str(target.__class__))
self._seen_so_far = defaultdict(int) # mapping: epoch_idx -> seen_so_far
n = len(str(self.target))
self._counter_fmt = '(%%%dd/%%%dd)' % (n, n)
if name is None:
name = "Progress-%s" % datetime.utcnow()
self._name = name
# ====== flags ====== #
self.__interval = float(interval)
self.__keep = keep
self.print_progress = bool(print_progress)
self.print_report = bool(print_report)
self.print_summary = bool(print_summary)
# ====== for history ====== #
self._report = OrderedDict()
self._last_report = None
self._last_print_time = None
self._epoch_summarizer_func = {}
# ====== recording history ====== #
# dictonary: {epoch_id: {key: [value1, value2, ...]}}
self._epoch_hist = defaultdict(_default_dict_list_creator)
self._epoch_summary = defaultdict(dict)
self._epoch_idx = 0
self._epoch_start_time = None
# ====== iter information ====== #
if self.__iter_obj is None and \
(count_func is not None or report_func is not None):
raise RuntimeError("`count_func` and `report_func` can only be used "
"when `target` is an iterator with specific length.")
#
self.__count_func = _FuncWrap(func=count_func,
default_func=lambda x: len(x))
self.__report_func = _FuncWrap(func=report_func,
default_func=lambda x: None)
# ====== check progress function ====== #
self._progress_func = _FuncWrap(func=progress_func,
default_func=lambda x: x)
# ====== other ====== #
self._labels = None # labels for printing the confusion matrix
# ==================== History management ==================== #
def __getitem__(self, key):
return self._report.__getitem__(key)
def __setitem__(self, key, val):
self._epoch_hist[self.epoch_idx][key].append(val)
return self._report.__setitem__(key, val)
def __delitem__(self, key):
return self._report.__delitem__(key)
def __iter__(self):
if self.__iter_obj is None:
raise RuntimeError("This Progbar cannot be iterated, "
"the set `target` must be iterable.")
for X in self.__iter_obj:
count = self.__count_func(X)
report = self.__report_func(X)
if report is not None:
for key, val in report:
self[key] = val
self.add(int(count))
yield X
del self.__iter_obj
del self.__count_func
del self.__report_func
# ==================== screen control ==================== #
@property
def epoch_idx(self):
return self._epoch_idx
@property
def nb_epoch(self):
return self._epoch_idx + 1
@property
def name(self):
return self._name
@property
def labels(self):
""" Special labels for printing the confusion matrix. """
return self._labels
@property
def history(self):
""" Return
dictonary:
{epoch_id : {tensor_name0: [batch_return1, batch_return2, ...],
tensor_name1: [batch_return1, batch_return2, ...],
...},
1 : {tensor_name0: [batch_return1, batch_return2, ...],
tensor_name1: [batch_return1, batch_return2, ...],
...},
... }
Example
-------
>>> for epoch_id, results in task.history.items():
>>> for tensor_name, values in results.items():
>>> print(tensor_name, len(values))
"""
return self._epoch_hist
def get_report(self, epoch=-1, key=None):
if epoch < 0:
epoch = self.nb_epoch + epoch - 1
return self._epoch_hist[epoch] if key is None else \
self._epoch_hist[epoch][key]
def set_summarizer(self, key, fn):
""" Epoch summarizer is a function, searching in the
report for given key, and summarize all the stored values
of each epoch int
|
Yarrick13/hwasp
|
tests/wasp1/AllAnswerSets/3col_aggregates_1_5_enc2.test.py
|
Python
|
apache-2.0
| 14,386
| 0.001738
|
input = """
% Guess colours.
chosenColour(N,C) | notChosenColour(N,C) :- node(N), colour(C).
% At least one color per node.
:- #count{ C : chosenColour(X,C) } > 1, node(X).
:- #count{ C : chosenColour(X,C) } < 1, node(X).
% No two adjacent nodes have the same colour.
:- link(X,Y), X<Y, chosenColour(X,C), chosenColour(Y,C).
node(1).
node(2).
node(3).
node(4).
node(5).
link(1,2).
link(2,1).
link(1,3).
link(3,1).
link(2,3).
link(3,2).
link(3,5).
link(5,3).
link(4,5).
link(5,4).
colour(red0).
colour(green0).
colour(blue0).
"""
output = """
{chosenColour(1,blue0), chosenColour(2,green0), chosenColour(3,red0), chosenColour(4,blue0), chosenColour(5,green0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,green0), notChosenColour(1,red0), notChosenColour(2,blue0), notChosenColour(2,red0), notChosenColour(3,blue0), notChosenColour(3,green0), notChosenColour(4,green0), notChosenColour(4,red0), notChosenColour(5,blue0), notChosenColour(5,red0)}
{chosenColour(1,blue0), chosenColour(2,green0), chosenColour(3,red0), chosenColour(4,green0), chosenColour(5,blue0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,green0), notChosenColour(1,red0), notChosenColour(2,blue0), notChosenColour(2,red0), notChosenColour(3,blue0), notChosenColour(3,green0), notChosenColour(4,blue0), notChosenColour(4,red0), notChosenColour(5,green0), notChosenColour(5,red0)}
{chosenColour(1,blue0), chosenColour(2,green0), chosenColour(3,red0), chosenColour(4,red0), chosenColour(5,blue0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,green0), notChosenColour(1,red0), notChosenColour(2,blue0), notChosenColour(2,red0), notChosenColour(3,blue0), notChosenColour(3,green0), notChosenColour(4,blue0), notChosenColour(4,green0), notChosenColour(5,green0), notChosenColour(5,red0)}
{chosenColour(1,blue0), chosenColour(2,green0), chosenColour(3,red0), chosenColour(4,red0), chosenColour(5,green0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,green0), notChosenColour(1,red0), notChosenColour(2,blue0), notChosenColour(2,red0), notChosenColour(3,blue0), notChosenColour(3,green0), notChosenColour(4,blue0), notChosenColour(4,green0), notChosenColour(5,blue0), notChosenColour(5,red0)}
{chosenColour(1,blue0), chosenColour(2,red0),
|
chosenColour(3,green0), chosenColour(4,blue0), chosenColour(5,red0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link
|
(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,green0), notChosenColour(1,red0), notChosenColour(2,blue0), notChosenColour(2,green0), notChosenColour(3,blue0), notChosenColour(3,red0), notChosenColour(4,green0), notChosenColour(4,red0), notChosenColour(5,blue0), notChosenColour(5,green0)}
{chosenColour(1,blue0), chosenColour(2,red0), chosenColour(3,green0), chosenColour(4,green0), chosenColour(5,blue0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,green0), notChosenColour(1,red0), notChosenColour(2,blue0), notChosenColour(2,green0), notChosenColour(3,blue0), notChosenColour(3,red0), notChosenColour(4,blue0), notChosenColour(4,red0), notChosenColour(5,green0), notChosenColour(5,red0)}
{chosenColour(1,blue0), chosenColour(2,red0), chosenColour(3,green0), chosenColour(4,green0), chosenColour(5,red0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,green0), notChosenColour(1,red0), notChosenColour(2,blue0), notChosenColour(2,green0), notChosenColour(3,blue0), notChosenColour(3,red0), notChosenColour(4,blue0), notChosenColour(4,red0), notChosenColour(5,blue0), notChosenColour(5,green0)}
{chosenColour(1,blue0), chosenColour(2,red0), chosenColour(3,green0), chosenColour(4,red0), chosenColour(5,blue0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,green0), notChosenColour(1,red0), notChosenColour(2,blue0), notChosenColour(2,green0), notChosenColour(3,blue0), notChosenColour(3,red0), notChosenColour(4,blue0), notChosenColour(4,green0), notChosenColour(5,green0), notChosenColour(5,red0)}
{chosenColour(1,green0), chosenColour(2,blue0), chosenColour(3,red0), chosenColour(4,blue0), chosenColour(5,green0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,blue0), notChosenColour(1,red0), notChosenColour(2,green0), notChosenColour(2,red0), notChosenColour(3,blue0), notChosenColour(3,green0), notChosenColour(4,green0), notChosenColour(4,red0), notChosenColour(5,blue0), notChosenColour(5,red0)}
{chosenColour(1,green0), chosenColour(2,blue0), chosenColour(3,red0), chosenColour(4,green0), chosenColour(5,blue0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,blue0), notChosenColour(1,red0), notChosenColour(2,green0), notChosenColour(2,red0), notChosenColour(3,blue0), notChosenColour(3,green0), notChosenColour(4,blue0), notChosenColour(4,red0), notChosenColour(5,green0), notChosenColour(5,red0)}
{chosenColour(1,green0), chosenColour(2,blue0), chosenColour(3,red0), chosenColour(4,red0), chosenColour(5,blue0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,blue0), notChosenColour(1,red0), notChosenColour(2,green0), notChosenColour(2,red0), notChosenColour(3,blue0), notChosenColour(3,green0), notChosenColour(4,blue0), notChosenColour(4,green0), notChosenColour(5,green0), notChosenColour(5,red0)}
{chosenColour(1,green0), chosenColour(2,blue0), chosenColour(3,red0), chosenColour(4,red0), chosenColour(5,green0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,blue0), notChosenColour(1,red0), notChosenColour(2,green0), notChosenColour(2,red0), notChosenColour(3,blue0), notChosenColour(3,green0), notChosenColour(4,blue0), notChosenColour(4,green0), notChosenColour(5,blue0), notChosenColour(5,red0)}
{chosenColour(1,green0), chosenColour(2,red0), chosenColour(3,blue0), chosenColour(4,blue0), chosenColour(5,green0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,blue0), notChosenColour(1,red0), notChosenColour(2,blue0), notChosenColour(2,green0), notChosenColour(3,green0), notChosenColour(3,red0), notChosenColour(4,green0), notChosenColour(4,red0), notChosenColour(5,blue0), notChosenColour(5,red0)}
{chosenColour(1,green0), chosenColour(2,red0), chosenColour(3,blue0), chosenColour(4,blue0), chosenColour(5,red0), colour(blue0), colo
|
Anthony25/barython
|
barython/widgets/base.py
|
Python
|
bsd-3-clause
| 10,749
| 0.000558
|
#!/usr/bin/env python3
import fcntl
import logging
import os
import shlex
import subprocess
import threading
from barython.hooks import HooksPool
from barython.tools import splitted_sleep
logger = logging.getLogger("barython")
def protect_handler(handler):
def handler_wrapper(self, *args, **kwargs):
try:
if not self._refresh_lock.acquire(blocking=False):
return
result = handler(self, *args, **kwargs)
finally:
if self._lock_start:
try:
self._refresh_lock.release()
except RuntimeError:
pass
return result
return handler_wrapper
class Widget():
"""
Basic Widget
"""
#: cache the content after update
_content = None
_icon = None
_refresh = -1
@property
def content(self):
return self._content
@property
def icon(self):
return self._icon
@icon.setter
def icon(self, value):
self._icon = value
@property
def refresh(self):
if self._refresh == -1 and self.screens:
return min([screen.refresh for screen in self.screens])
else:
return max(0, self._refresh)
@refresh.setter
def refresh(self, value):
self._refresh = value
def decorate(self, text, fg=None, bg=None, padding=0, font=None, icon=None,
actions=None):
"""
Decorate a text with custom properties
:param fg: foreground
:param bg: background
:param padding: padding around the text
:param font: index of font to use
:param actions: dict of actions
"""
try:
joined_actions = "".join(
"%{{A{}:{}:}}".format(a, cmd) for a, cmd in actions.items()
)
except (TypeError, AttributeError):
joined_actions = ""
# if colors are reset in text, padding will not have the good colors
if padding and text:
padding_str = self.decorate(padding * " ", fg=fg, bg=bg, font=font)
else:
padding_str = ""
return (12*"{}").format(
joined_actions,
padding_str,
"%{{B{}}}".format(bg) if bg else "",
"%{{F{}}}".format(fg) if fg else "",
"%{{T{}}}".format(font) if font else "",
icon + " " if icon else "",
text,
"%{{T-}}".format(font) if font else "",
"%{F-}" if fg else "",
"%{B-}" if bg else "",
padding_str,
"%{A}" * len(actions) if actions else "",
)
def decorate_with_self_attributes(self, text, *args, **kwargs):
"""
Return self.decorate but uses self attributes for default values
"""
d_kwargs = {
"fg": self.fg, "bg": self.bg, "padding": self.padding,
"font": self.fonts[0] if self.fonts else None,
"actions": self.actions, **kwargs
}
for parameter, value in zip(("fg", "bg", "padding", "font", "actions"),
args):
d_kwargs[parameter] = value
return self.decorate(text, **d_kwargs)
def trigger_global_update(self, output=None, *args, **kwargs):
new_content = self.decorate_with_self_attributes(output)
self._update_screens(new_content)
@protect_handler
def handler(self, *args, **kwargs):
"""
To use with hooks
"""
with self._lock_update:
self.update()
splitted_sleep(self.refresh, stop=self._stop.is_set)
def organize_result(self, *args, **kwargs):
"""
Organize the info to show with the splitted infos received
Organize the panel without handling the decoration (fg, bg, etc…)
Override this method to change the way the info is printed
"""
result = "{} ".format(self.icon) if self.icon else ""
return result + "".join(*args, *kwargs.values())
def _update_screens(self, new_content):
"""
If content has changed, request the screen update
"""
if self._content != new_content:
self._content = new_content
for screen in self.screens:
threading.Thread(target=screen.update).start()
def continuous_update(self):
while not self._stop.is_set():
try:
self.update()
except Exception as e:
logger.error(e)
splitted_sleep(self.refresh, stop=self._stop.is_set)
def update(self):
pass
def propage_hooks_changes(self):
"""
Propage a change in the hooks pool
"""
if getattr(self, "screens", None):
for s in self.screens:
s.hooks.merge(self)
def start(self, *args, **kwargs):
self._stop.clear()
try:
if not self._lock_start.acquire(blocking=False):
return
if self.infinite:
self.continuous_update()
else:
self.update()
finally:
if self._lock_start:
try:
self._lock_start.release()
except RuntimeError:
pass
def stop(self):
self._stop.set()
def __init__(self, bg=None, fg=None, padding=0, fonts=None, icon="",
actions=None, refresh=-1, screens=None, infinite=False):
#: background for the widget
self.bg = bg
#: foreground for the widget
self.fg = fg
#: list of fonts index used
self.fonts = fonts if fonts is not None else tuple()
#: icon to use. Can be a string or a dict for some widgets, where icon
# will depend about the current value.
self._icon = icon
#: dictionnary of actions
self.actions = actions if actions is not None else dict()
#: padding
self.padding = padding
#: refresh rate
self.refresh = refresh
#: screens linked.
|
Used for callbacks
self.screens = screens if screens is not None else set()
#: pool of hooks
self.hooks = HooksPool(parent=self)
#: run in an infinite loop or not
self.infinite = infinite
#: event to stop the widget
self._stop = threading.Event()
self._lock_start = threading.Conditio
|
n()
self._lock_update = threading.Condition()
self._refresh_lock = threading.Semaphore(2)
class TextWidget(Widget):
text = ""
def update(self):
with self._lock_update:
new_content = self.decorate_with_self_attributes(
self.organize_result(self.text)
)
self._update_screens(new_content)
def start(self):
with self._lock_start:
self.update()
def __init__(self, text=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.text = self.text if text is None else text
self.infinite = False
class SubprocessWidget(Widget):
"""
Run a subprocess in a loop
"""
_subscribe_subproc = None
_subproc = None
def _no_blocking_read(self, output):
"""
Set the output to be non blockant and read it
"""
fd = output.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
try:
result = output.read()
except:
result = b""
fcntl.fcntl(fd, fcntl.F_SETFL, fl)
return result
def _init_subprocess(self, cmd):
"""
Start cmd in a subprocess, and split it if needed
"""
if self._stop.is_set():
return None
if isinstance(cmd, str):
cmd = shlex.split(cmd)
logger.debug("Launching {}".format(" ".join(cmd)))
return subprocess.Popen(
cmd, stdout=subprocess.PIPE, shell=self.shell, env=self.env
)
def _init_subscribe_subproc(self):
process_dead = (
self._subscribe_subproc is None or
|
JARR-aggregator/JARR
|
newspipe/commands.py
|
Python
|
agpl-3.0
| 5,322
| 0.002067
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import os
from dateutil.relativedelta import relativedelta
from datetime import datetime, date
import click
from werkzeug.security import generate_password_hash
import newspipe.models
from newspipe.bootstrap import application, db
from newspipe.controllers import UserController, ArticleController
logger = logging.getLogger("commands")
@application.cli.command("db_empty")
def db_empty():
"Will drop every datas stocked in db."
with application.app_context():
newspipe.models.db_empty(db)
@application.cli.command("db_create")
def db_create():
"Will create the database from conf parameters."
with application.app_context():
try:
db.create_all()
except Exception as e:
print(e)
@application.cli.command("create_admin")
@click.option("--nickname", default="admin", help="Nickname")
@click.option("--password", default="password", help="Password")
def create_admin(nickname, password):
"Will create an admin user."
admin = {
"is_admin": True,
"is_api": True,
"is_active": True,
"nickname": nickname,
"pwdhash": generate_password_hash(password),
}
with application.app_context():
try:
UserController(ignore_context=True).create(**admin)
except Exception as e:
print(e)
@application.cli.command("delete_user")
@click.option("--user-id", required=True, help="Id of the user to delete.")
def delete_user(user_id=None):
"Delete the user with the id specified in the command line."
try:
user = UserController().delete(user_id)
print("User {} deleted".format(user.nickname))
except Exception as e:
print(e)
@application.cli.command("delete_inactive_users")
@click.option("--last-seen", default=6, help="Number of months since last seen.")
def delete_inactive_users(last_seen):
"Delete inactive users (inactivity is given in parameter and specified in number of months)."
filter = {}
filter["last_seen__lt"] = date.today() - relativedelta(months=last_seen)
users = UserController().read(**filter)
for user in users:
db.session.delete(user)
try:
print("Deleting user
|
{}...".format(user.nickname))
|
db.session.commit()
except:
db.session.rollback()
print("Inactive users deleted.")
@application.cli.command("disable_inactive_users")
@click.option("--last-seen", default=6, help="Number of months since last seen.")
def disable_inactive_users(last_seen):
"Disable inactive users (inactivity is given in parameter and specified in number of months)."
filter = {}
filter["last_seen__lt"] = date.today() - relativedelta(months=last_seen)
users = UserController().read(**filter)
for user in users:
user.is_active = False
user.is_public_profile = False
user.automatic_crawling = False
try:
print("Updating user {}...".format(user.nickname))
db.session.commit()
except:
db.session.rollback()
print("Inactive users disabled.")
@application.cli.command("delete_read_articles")
def delete_read_articles():
"Delete read articles (and not liked) retrieved since more than 60 days ago."
filter = {}
filter["user_id__ne"] = 1
filter["readed"] = True
filter["like"] = False
filter["retrieved_date__lt"] = date.today() - relativedelta(days=60)
articles = ArticleController().read(**filter).limit(5000)
for article in articles:
try:
db.session.delete(article)
db.session.commit()
except:
db.session.rollback()
print("Read articles deleted.")
@application.cli.command("fix_article_entry_id")
def fix_article_entry_id():
filter = {}
filter["entry_id"] = None
articles = ArticleController().read(**filter).limit(50)
for article in articles:
try:
article.entry_id = str(article.id)
db.session.commit()
except:
db.session.rollback()
@application.cli.command("fetch_asyncio")
@click.option("--user-id", default=None, help="Id of the user")
@click.option("--feed-id", default=None, help="If of the feed")
def fetch_asyncio(user_id=None, feed_id=None):
"Crawl the feeds with asyncio."
import asyncio
with application.app_context():
from newspipe.crawler import default_crawler
filters = {}
filters["is_active"] = True
filters["automatic_crawling"] = True
if None is not user_id:
filters["id"] = user_id
users = UserController().read(**filters).all()
try:
feed_id = int(feed_id)
except:
feed_id = None
loop = asyncio.get_event_loop()
queue = asyncio.Queue(maxsize=3, loop=loop)
producer_coro = default_crawler.retrieve_feed(queue, users, feed_id)
consumer_coro = default_crawler.insert_articles(queue, 1)
logger.info("Starting crawler.")
start = datetime.now()
loop.run_until_complete(asyncio.gather(producer_coro, consumer_coro))
end = datetime.now()
loop.close()
logger.info("Crawler finished in {} seconds.".format((end - start).seconds))
|
bearicc/python-wavelet-transform
|
mycwt.py
|
Python
|
agpl-3.0
| 1,948
| 0.010267
|
""" ------------------------------
bior2_6
cwt
by BEAR, 05/04/14
------------------------------ """
import scipy as sp
import numpy as np
from scipy.signal import convolve
#import pywt
_scale_max = 1024
_scale_max = int(2**(sp.ceil(sp.log2(_scale_max))))
tmp = np.loadtxt('bior2.6_1024.txt')
_x_bior2_6 = tmp[:,0]
_psi_bior2_6 = tmp[:,1]
#_, _psi_bior2_6, _, _, _x_bior2_6 = pywt.Wavelet('bior2.6').wavefun(sp.log2(_scale_max))
def bior2_6(length, width):
length = int(length)
width = int(width)
i = sp.arange(0, 13*width)
u = _psi_bior2_6[_scale_max*i/width]/sp.sqrt(width)
n = int(abs((length-width*13)/2))
if length > width*13:
u = sp.concatenate((u,sp.zeros(length-width*13)), axis=0)
u = sp.roll(u, n)
elif length < width*13:
u = u[n:n+length]
return u
def cwt(x, scales, wname, bplot=False):
coefs = sp.zeros((len(scales), len(x)))
for i in range(0, len(scales)):
if wname == 'bior2.6':
length = min(13*scales[i], len(x))
wavelet = bior2_6
coefs[i-1,
|
:] = convolve(x, wavelet(length, i), mode='same')
if bplot:
|
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
plt.ion()
fig = plt.figure(num=None, figsize=(14,5), dpi=100, facecolor='w', edgecolor='k')
plt.clf()
gs = gridspec.GridSpec(3, 1)
ax1 = fig.add_subplot(gs[0,0])
ax1.plot(x,'b-')
ax2 = fig.add_subplot(gs[1:,0])
im = ax2.imshow(coefs[::-1,:], extent=[0, len(x), scales[0], scales[-1]], aspect='auto', cmap='jet')
ax2.invert_yaxis()
ax2.set_xlabel('t')
ax2.set_ylabel('scale')
l, b, w, h = ax2.get_position().bounds
cax = fig.add_axes([l+w+0.01, b, 0.02, h])
plt.colorbar(im, cax=cax)
plt.suptitle('cwt by python')
plt.draw()
plt.show(block=True)
return coefs
|
Vauxoo/stock-logistics-warehouse
|
stock_orderpoint_generator/wizard/__init__.py
|
Python
|
agpl-3.0
| 35
| 0
|
fr
|
om . impor
|
t orderpoint_generator
|
eschleicher/flask_shopping_list
|
venv/lib/python3.4/site-packages/argh/interaction.py
|
Python
|
mit
| 2,403
| 0.00125
|
# coding: utf-8
#
# Copyright © 2010—2014 Andrey Mikhaylenko and contributors
#
# This file is part of Argh.
#
# Argh is free software under terms of the GNU Lesser
# General
|
Public License version 3 (LGPLv3) as published by the Free
# Software Foundation. See the file README.rst for copying conditions.
#
"""
Interaction
~~~~~~~~~~~
"""
from argh.compat import text_type
from argh.io import safe_input
__all__ = ['confirm', 'safe_input']
def confirm(action, default=None, skip=False):
"""
A shortcut for typical confirmation prompt.
:param action:
a string describing the action, e.g.
|
"Apply changes". A question mark
will be appended.
:param default:
`bool` or `None`. Determines what happens when user hits :kbd:`Enter`
without typing in a choice. If `True`, default choice is "yes". If
`False`, it is "no". If `None` the prompt keeps reappearing until user
types in a choice (not necessarily acceptable) or until the number of
iteration reaches the limit. Default is `None`.
:param skip:
`bool`; if `True`, no interactive prompt is used and default choice is
returned (useful for batch mode). Default is `False`.
Usage::
def delete(key, silent=False):
item = db.get(Item, args.key)
if confirm('Delete '+item.title, default=True, skip=silent):
item.delete()
print('Item deleted.')
else:
print('Operation cancelled.')
Returns `None` on `KeyboardInterrupt` event.
"""
MAX_ITERATIONS = 3
if skip:
return default
else:
defaults = {
None: ('y','n'),
True: ('Y','n'),
False: ('y','N'),
}
y, n = defaults[default]
prompt = text_type('{action}? ({y}/{n})').format(**locals())
choice = None
try:
if default is None:
cnt = 1
while not choice and cnt < MAX_ITERATIONS:
choice = safe_input(prompt)
cnt += 1
else:
choice = safe_input(prompt)
except KeyboardInterrupt:
return None
if choice in ('yes', 'y', 'Y'):
return True
if choice in ('no', 'n', 'N'):
return False
if default is not None:
return default
return None
|
PythonSanSebastian/python-rtmbot
|
plugins/ep_volunteer/catch_all.py
|
Python
|
mit
| 38
| 0
|
def
|
catch_all(data):
prin
|
t(data)
|
pigmonkey/django-wishlist
|
wishlist/urls.py
|
Python
|
bsd-3-clause
| 443
| 0.002257
|
from django.conf.urls.defaults import *
from wishlist.models import Item
from wishlist.views impor
|
t add_item, delete_item, wishlist, bookmarklet
urlpatterns = patterns('',
(r'^add/$', add_item),
(r'^(?P<id>\d+)/delete/$', delete_item),
url(r'^tag/(?P<querytag>[^/]+)/$', view=wishlist, name="items_by_tag"),
(r'^bookmarklet/$', bookmarklet),
(r'^sort/(?P<sort_by>\w+)/(?P<sort>\w+)/$', wish
|
list),
(r'^$', wishlist),
)
|
bacaldwell/ironic
|
ironic/tests/unit/drivers/modules/ilo/test_vendor.py
|
Python
|
apache-2.0
| 9,258
| 0
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test class for vendor methods used by iLO modules."""
import mock
from ironic.common import exception
from ironic.common import states
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.drivers.modules import agent
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules.ilo import common as ilo_common
from ironic.drivers.modules.ilo import vendor as ilo_vendor
from ironic.drivers.modules import iscsi_deploy
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.db import utils as db_utils
from ironic.tests.unit.objects import utils as obj_utils
INFO_DICT = db_utils.get_test_ilo_info()
class VendorPassthruTestCase(db_base.DbTestCase):
def setUp(self):
super(VendorPassthruTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="iscsi_ilo")
self.node = obj_utils.create_test_node(self.context,
driver='iscsi_ilo',
driver_info=INFO_DICT)
@mock.patch.object(manager_utils, 'node_power_action', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'setup_vmedia', spec_set=True,
autospec=True)
def test_boot_into_iso(self, setup_vmedia_mock, power_action_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.vendor.boot_into_iso(task, boot_iso_href='foo')
setup_vmedia_mock.assert_called_once_with(task, 'foo',
ramdisk_options=None)
power_action_mock.assert_called_once_with(task, states.REBOOT)
@mock.patch.object(ilo_vendor.VendorPassthru, '_validate_boot_into_iso',
spec_set=True, autospec=True)
def test_validate_boot_into_iso(self, validate_boot_into_iso_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
vendor = ilo_vendor.VendorPassthru()
vendor.validate(task, method='boot_into_iso', foo='bar')
validate_boot_into_iso_mock.assert_called_once_with(
vendor, task, {'foo': 'bar'})
def test__validate_boot_into_iso_invalid_state(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.provision_state = states.AVAILABLE
self.assertRaises(
exception.InvalidStateRequested,
task.driver.vendor._validate_boot_into_iso,
task, {})
def test__validate_boot_into_iso_missing_boot_iso_href(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.provision_state = states.MANAGEABLE
self.assertRaises(
exception.MissingParameterValue,
task.driver.vendor._validate_boot_into_iso,
task, {})
@mock.patch.object(deploy_utils, 'validate_image_properties',
spec_set=True, autospec=True)
def test__validate_boot_into_iso_manage(self, validate_image_prop_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
info = {'boot_iso_href': 'foo'}
task.node.provision_state = states.MANAGEABLE
task.driver.vendor._validate_boot_into_iso(
task, info)
validate_image_prop_mock.assert_called_once_with(
task.context, {'image_source': 'foo'}, [])
@mock.patch.object(deploy_utils, 'validate_image_properties',
spec_set=True, autospec=True)
def test__validate_boot_into_iso_maintenance(
self, validate_image_prop_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
info = {'boot_iso_href': 'foo'}
task.node.maintenance = True
task.driver.vendor._validate_boot_into_iso(
task, info)
validate_image_prop_mock.assert_called_once_with(
task.context, {'image_source': 'foo
|
'}, [])
@mock.patch.object(iscsi_deploy.VendorPassthru, 'continue_deploy'
|
,
spec_set=True, autospec=True)
@mock.patch.object(ilo_common, 'update_secure_boot_mode', autospec=True)
@mock.patch.object(ilo_common, 'update_boot_mode', autospec=True)
def test_continue_deploy(self,
func_update_boot_mode,
func_update_secure_boot_mode,
pxe_vendorpassthru_mock):
kwargs = {'address': '123456'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.provision_state = states.DEPLOYWAIT
task.node.target_provision_state = states.ACTIVE
task.driver.vendor.continue_deploy(task, **kwargs)
func_update_boot_mode.assert_called_once_with(task)
func_update_secure_boot_mode.assert_called_once_with(task, True)
pxe_vendorpassthru_mock.assert_called_once_with(
mock.ANY, task, **kwargs)
class IloVirtualMediaAgentVendorInterfaceTestCase(db_base.DbTestCase):
def setUp(self):
super(IloVirtualMediaAgentVendorInterfaceTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="agent_ilo")
self.node = obj_utils.create_test_node(
self.context, driver='agent_ilo', driver_info=INFO_DICT)
@mock.patch.object(agent.AgentVendorInterface, 'reboot_to_instance',
spec_set=True, autospec=True)
@mock.patch.object(agent.AgentVendorInterface, 'check_deploy_success',
spec_set=True, autospec=True)
@mock.patch.object(ilo_common, 'update_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'update_secure_boot_mode', spec_set=True,
autospec=True)
def test_reboot_to_instance(self, func_update_secure_boot_mode,
func_update_boot_mode,
check_deploy_success_mock,
agent_reboot_to_instance_mock):
kwargs = {'address': '123456'}
check_deploy_success_mock.return_value = None
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.vendor.reboot_to_instance(task, **kwargs)
check_deploy_success_mock.assert_called_once_with(
mock.ANY, task.node)
func_update_boot_mode.assert_called_once_with(task)
func_update_secure_boot_mode.assert_called_once_with(task, True)
agent_reboot_to_instance_mock.assert_called_once_with(
mock.ANY, task, **kwargs)
@mock.patch.object(agent.AgentVendorInterface, 'reboot_to_instance',
spec_set=True, autospec=True)
@mock.patch.object(agent.AgentVendorInterface, 'check_deploy_success',
spec_set=True, autospec=True)
@mock.patch.object(ilo_common, 'update_boot_mode', s
|
scott-w/pyne-django-tutorial
|
chatter/chatter/urls.py
|
Python
|
mit
| 1,113
| 0
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns(
'',
# Examples:
# url(r'^$', 'chatter.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'', include('chatter.base.urls')),
url(r'^admin/', include(admin.site.urls)),
)
# The documentation for authentication views can be found at:
# https://docs.djangoproject.com/en/1.7/topics/auth/default/#module-django.contrib.auth.views
urlpatterns += patterns(
'django.contrib.auth.views',
url(r'^lo
|
gin/$', 'login', name='login'),
url(r'^logout/$', 'logout_then_login', name='logout'),
url(r'^reset/$', 'password_reset', name='password_reset'),
url(r'^reset/done/$', 'password_reset_done', name='password_reset_done'),
url(
r'^reset/confirm/'
r'(?P<uidb64>[0-9A-Za-z_\-]+)/'
r'(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
'password_reset_confirm',
|
name='password_reset-confirm'),
url(
r'^reset/complete/$',
'password_reset_complete',
name='password_reset_complete'),
)
|
a25kk/newe
|
src/newe.sitecontent/newe/sitecontent/testing.py
|
Python
|
mit
| 1,963
| 0
|
# -*- coding: utf-8 -*-
"""Base module for unittesting."""
from plone.app.testing import applyProfile
from plone.app.testing import FunctionalTesting
from plone.app.testing import IntegrationTesting
from plone.app.testing import login
from plone.app.testing import PLONE_FIXTURE
from plone.app.testing import PloneSandboxLayer
from plone
|
.app.testing import setRoles
from plone.app.tes
|
ting import TEST_USER_ID
from plone.app.testing import TEST_USER_NAME
from plone.testing import z2
import unittest2 as unittest
class neweSitecontentLayer(PloneSandboxLayer):
defaultBases = (PLONE_FIXTURE,)
def setUpZope(self, app, configurationContext):
"""Set up Zope."""
# Load ZCML
import newe.sitecontent
self.loadZCML(package=newe.sitecontent)
z2.installProduct(app, 'newe.sitecontent')
def setUpPloneSite(self, portal):
"""Set up Plone."""
# Install into Plone site using portal_setup
applyProfile(portal, 'newe.sitecontent:default')
# Login and create some test content
setRoles(portal, TEST_USER_ID, ['Manager'])
login(portal, TEST_USER_NAME)
portal.invokeFactory('Folder', 'folder')
# Commit so that the test browser sees these objects
portal.portal_catalog.clearFindAndRebuild()
import transaction
transaction.commit()
def tearDownZope(self, app):
"""Tear down Zope."""
z2.uninstallProduct(app, 'newe.sitecontent')
FIXTURE = CkSitecontentLayer()
INTEGRATION_TESTING = IntegrationTesting(
bases=(FIXTURE,), name="CkSitecontentLayer:Integration")
FUNCTIONAL_TESTING = FunctionalTesting(
bases=(FIXTURE,), name="CkSitecontentLayer:Functional")
class IntegrationTestCase(unittest.TestCase):
"""Base class for integration tests."""
layer = INTEGRATION_TESTING
class FunctionalTestCase(unittest.TestCase):
"""Base class for functional tests."""
layer = FUNCTIONAL_TESTING
|
airwoot/timeline-hack-core
|
app/resources.py
|
Python
|
mit
| 7,302
| 0.015338
|
from flask import current_app, redirect, url_for, request, session, flash, send_file
from flask.ext import restful
from flask.ext.login import login_required, current_user, login_user, logout_user
import twitter
from request_parsers import *
from datetime import datetime
from models import *
from rauth.service import OAuth1Service
from rauth.utils import parse_utf8_qsl
from twitter_helpers import TwitterUser
import controllers
import traceback
class TwitterAuth(restful.Resource):
def get(self):
twitter_auth_loader = OAuth1Service(
name='twitter',
consumer_key=current_app.config['CONSUMER_KEY'],
consumer_secret=current_app.config['CONSUMER_SECRET'],
request_token_url='https://api.twitter.com/oauth/request_token',
access_token_url='https://api.twitter.com/oauth/access_token',
authorize_url='https://api.twitter.com/oauth/authorize',
base_url='https://api.twitter.com/1.1/'
)
oauth_callback = url_for('twittercallback', _external=True)
params = {'oauth_callback': oauth_callback}
auth_url = twitter_auth_loader.get_raw_request_token(params=params)
data = parse_utf8_qsl(auth_url.content)
session['twitter_oauth'] = (data['oauth_token'],
data['oauth_token_secret'])
return redirect(twitter_auth_loader.get_authorize_url(data['oauth_token'], **params))
class Login(restful.Resource):
def get(self):
return send_file('views/index.html')
#return current_app.send_static_file('views/login.html')
return {'status':'Welcome'}
class TwitterCallback(restful.Resource):
def get(self):
try:
print session
request_token, request_token_secret = session.pop('twitter_oauth')
twitter_auth_loader = OAuth1Service(
name='twitter',
consumer_key=current_app.config['CONSUMER_KEY'],
consumer_secret=current_app.config['CONSUMER_SECRET'],
request_token_url='https://api.twitter.com/oauth/request_token',
access_token_url='https://api.twitter.com/oauth/access_token',
authorize_url='https://api.twitter.com/oauth/authorize',
base_url='https://api.twitter.com/1.1/'
)
if not 'oauth_token' in request.args:
print 'You did not authorize the request'
return redirect(url_for('index'))
try:
creds = {'request_token': request_token,
'request_token_secret': request_token_secret}
params = {'oauth_verifier': request.args['oauth_verifier']}
sess = twitter_auth_loader.get_auth_session(params=params, **creds)
print sess.access_token
except Exception, e:
flash('There was a problem logging into Twitter: ' + str(e))
return redirect(url_for('index'))
api = twitter.Api(
current_app.config['CONSUMER_KEY'],
current_app.config['CONSUMER_SECRET'],
sess.access_token,
sess.access_token_secret
)
u = api.VerifyCredentials()
user = User.objects(twitter_id = u.id).first()
if not user:
user = User(twitter_id = u.id, screen_name = u.screen_name, registered_on = datetime.now(), access_token = sess.access_token, access_token_secret = sess.access_token_secret)
user.save()
else:
user.update(set__access_token = sess.access_token, set__access_token_secret = sess.access_token_secret)
login_user(user)
# return controllers.get_logged_in_users_list(user)
return redirect('http://localhost:8000')
except Exception as e:
import traceback
print traceback.format_exc(e)
restful.abort(500, message = 'Internal Server Error.')
class MyLists(restful.Resource):
@login_required
def get(self):
#args = list_parser.parse_args()
#TODO also return subscribed lists
user = current_user
try:
return controllers.get_logged_in_users_list(user)
pass
except twitter.TwitterError as e:
if e.message[0]['code'] == 88:
restful.abort(404, message = 'Limit for your access token has reached. Be patient and see some of the popular timelines')
except Exception as e:
import traceback
print traceback.format_exc(e)
restf
|
ul.abort(500, message = 'internal server error.')
class CreateList(restful.
|
Resource):
@login_required
def get(self):
args = create_list_parser.parse_args()
user = current_user
try:
return controllers.create_list(user, args['screen_name'])
pass
except twitter.TwitterError as e:
if e.message[0]['code'] == 34:
restful.abort(404, message = 'Sorry user not found on twitter.')
elif e.message[0]['code'] == 88:
restful.abort(404, message = 'Limit for your access token has reached. You can create more timenlines later. Try some of the popular timelines for now.')
except Exception as e:
import traceback
print traceback.format_exc(e)
restful.abort(500, message = 'internal server error.')
class SubscribeList(restful.Resource):
@login_required
def get(self):
args = subscribe_list_parser.parse_args()
user = current_user
try:
return controllers.subscribe_list(user, args['list_id'], args['owner_id'])
except twitter.TwitterError as e:
if e.message[0]['code'] == 88:
restful.abort(404, message = 'Limit for your access token has reached. You may subscribe to interesting timelines later. Just enjoy popular timelines for now.')
except Exception as e:
import traceback
print traceback.format_exc(e)
restful.abort(500, message = 'internal server error.')
class DiscoverList(restful.Resource):
@login_required
def get(self):
args = discover_list_parser.parse_args()
try:
list_objs = list(TimelineList._get_collection().find({'exists' : True}).skip(args['skip']).limit(args['limit']))
map(lambda x:x.pop('_id'),list_objs)
return list_objs
except Exception as e:
import traceback
print traceback.format_exc(e)
restful.abort(500, message = 'internal server error.')
class ListTimeline(restful.Resource):
@login_required
def get(self):
args = list_timeline_parser.parse_args()
user = current_user
try:
return controllers.list_timeline(user, args['list_id'], args['owner_id'], args['since_id'], args['count'])
except twitter.TwitterError as e:
if e.message[0]['code'] == 34:
controllers.update_list_status(args['list_id'], exists = False)
restful.abort(404, message = 'Sorry page not found')
except Exception as e:
import traceback
print traceback.format_exc(e)
restful.abort(500, message = 'internal server error.')
|
MTG/dunya
|
hindustani/migrations/0001_initial.py
|
Python
|
agpl-3.0
| 25,073
| 0.004706
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-09-19 15:30
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import hindustani.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('data', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Artist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('mbid', models.UUIDField(blank=True, null=True)),
('gender', models.CharField(blank=True, choices=[(b'M', b'Male'), (b'F', b'Female')], max_length=1, null=True)),
('begin', models.CharField(blank=True, max_length=10, null=True)),
('end', models.CharField(blank=True, max_length=10, null=True)),
('artist_type', models.CharField(choices=[(b'P', b'Person'), (b'G', b'Group')], default=b'P', max_length=1)),
('dummy', models.BooleanField(db_index=True, default=False)),
('description_edited', models.BooleanField(default=False)),
('description', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='data.Description')),
('group_members', models.ManyToManyField(blank=True, related_name='groups', to='hindustani.Artist')),
('images', models.ManyToManyField(related_name='hindustani_artist_image_set', to='data.Image')),
],
options={
'abstract': False,
},
bases=(hindustani.models.HindustaniStyle, models.Model),
),
migrations.CreateModel(
name='ArtistAlias',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('alias', models.CharField(max_length=100)),
('primary', models.BooleanField(default=False)),
('locale', models.CharField(blank=True, max_length=10, null=True)),
('artist', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='aliases', to='hindustani.Artist')),
],
options={
'abstract': False,
},
bases=(hindustani.models.HindustaniStyle, models.Model),
),
migrations.CreateModel(
name='Composer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('mbid', models.UUIDField(blank=True, null=True)),
('gender', models.CharField(blank=True, choices=[(b'M', b'Male'), (b'F', b'Female')], max_length=1, null=True)),
('begin', models.CharField(blank=True, max_length=10, null=True)),
('end', models.CharField(blank=True, max_length=10, null=True)),
('description', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='data.Description')),
('images', models.ManyToManyField(related_name='hindustani_composer_image_set', to='data.Image')),
('references', models.ManyToManyField(blank=True, related_name='hindustani_composer_reference_set', to='data.Source')),
('source', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='hindustani_composer_source_set', to='data.Source')),
],
options={
'abstract': False,
},
bases=(hindustani.models.HindustaniStyle, models.Model),
),
migrations.CreateModel(
name='ComposerAlias',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('alias', models.CharField(max_length=100)),
('primary', models.BooleanField(default=False)),
('locale', models.CharField(blank=True, max_length=10, null=True)),
('composer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='aliases', to='hindustani.Composer')),
],
options={
'abstract': False,
},
bases=(hindustani.models.HindustaniS
|
tyle, models.Model),
),
migrations.CreateModel(
name='Form',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
|
('common_name', models.CharField(max_length=50)),
('uuid', models.UUIDField(db_index=True)),
('description', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='data.Description')),
('images', models.ManyToManyField(related_name='hindustani_form_image_set', to='data.Image')),
('references', models.ManyToManyField(blank=True, related_name='hindustani_form_reference_set', to='data.Source')),
('source', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='hindustani_form_source_set', to='data.Source')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='FormAlias',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('form', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='aliases', to='hindustani.Form')),
],
),
migrations.CreateModel(
name='Instrument',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('percussion', models.BooleanField(default=False)),
('name', models.CharField(max_length=50)),
('mbid', models.UUIDField(blank=True, null=True)),
('hidden', models.BooleanField(default=False)),
('description', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='data.Description')),
('images', models.ManyToManyField(related_name='hindustani_instrument_image_set', to='data.Image')),
('references', models.ManyToManyField(blank=True, related_name='hindustani_instrument_reference_set', to='data.Source')),
('source', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='hindustani_instrument_source_set', to='data.Source')),
],
options={
'abstract': False,
},
bases=(hindustani.models.HindustaniStyle, models.Model),
),
migrations.CreateModel(
name='InstrumentPerformance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lead', models.BooleanField(default=False)),
('attributes', models.CharField(blank=True, max_length=200, null=True)),
('artist', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hindustani.Artist')),
('instrument', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='hindustani.Instrument')),
],
options={
'abstract': False,
},
bases=(hindustani.models.HindustaniStyle, models.Model),
),
migrations.CreateModel(
name='Laya',
|
ascott1/regulations-site
|
regulations/tests/layers_paragraph_markers_tests.py
|
Python
|
cc0-1.0
| 1,212
| 0.00165
|
from unittest import TestCase
from mock import patch
from regulations.generator.layers.paragraph_markers import *
class ParagraphMarkersLayerTest(TestCase):
@patch('regulations.generator.layers.paragraph_markers.loader')
def test
|
_apply_layer(self, loader):
pml = ParagraphMarkersLayer({
'1001-12-a': [{'text': '(a)', 'locations': [0]}],
'1001-12-q': [{'text': 'q.', 'locations': [1]}]
})
self.assertEqual([], pml.apply_layer('1002-01-01'))
a = pml.apply_layer('1001-12-a')
self.assertEqual(1, len(a))
self.assertEqual('(a)', a[0][0])
self.assertEqual([0], a[0][2])
call_args = loader.get_template.r
|
eturn_value.render.call_args[0][0]
self.assertEqual('(a)', call_args['paragraph'])
self.assertEqual('a', call_args['paragraph_stripped'])
q = pml.apply_layer('1001-12-q')
self.assertEqual(1, len(q))
self.assertEqual('q.', q[0][0])
self.assertEqual([1], q[0][2])
call_args = loader.get_template.return_value.render.call_args[0][0]
self.assertEqual('q.', call_args['paragraph'])
self.assertEqual('q', call_args['paragraph_stripped'])
|
Guidobelix/pyload
|
module/plugins/crypter/LetitbitNetFolder.py
|
Python
|
gpl-3.0
| 1,221
| 0.015561
|
# -*- coding: utf-8 -*-
import re
from module.plugins.internal.Crypter import Crypter
class LetitbitNetFolder(Crypter):
__name__ = "LetitbitNet"
__type__ = "crypter"
__version__ = "0.16"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?letitbit\.net/folder/\w+'
__config__ = [("activated" , "bool" , "Activated" , True ),
("use_premium" , "bool" , "Use premium account if available", True ),
("folder_per_package", "Default;Yes;No", "Create folder for each package" , "Default")]
__description__ = """Letitbit.net folder decrypter plugin"""
__license__ = "GPLv3
|
"
__authors__ = [("DHMH", "webmaster@pcProfil.de"),
("z00nx", "z00nx0@gmail.com")]
FOLDER_PATTERN = r'<table>(.*)</table>'
LINK_PATTERN = r'<a href="(.+?)" target="_blank">'
def decrypt(self, pyfile):
html = self.load(pyfile.url)
folder = re.search(self.FOLDER_PATTERN, html, re.S)
if folder is None:
self.error(_("FOLDER_PATTERN
|
not found"))
self.links.extend(re.findall(self.LINK_PATTERN, folder.group(0)))
|
llou/panopticon
|
panopticon/core/actions/service.py
|
Python
|
gpl-3.0
| 3,447
| 0.004062
|
# service.py is part of Panopticon.
# Panopticon is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Panopticon is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Panopticon. If not, see <http://www.gnu.org/licenses/>.
from panopticon.core.base import ServiceAttribute
from panopticon.core.actions.base import (ManagerDependantActionLauncher,
DependantAction, DecoratorAction, ActionManager)
class ServiceAction(ServiceAttribute, DependantAction):
_excluded_values_names = ["manager"]
def __init__(self, name=None, service=None, launcher=None):
super(ServiceAction, self).__init__(name=name, service=service)
DependantAction.__init__(self, launcher=launcher)
def check_required(self, action):
return action.service in self.service.required_services
def set_running_env(self, running_env):
running_env.action = self
running_env.service = self.service
def __repr__(self):
fields = []
if self.service is not None:
fields.append("service:'%s'" % self.service)
if self.name is not None:
fields.append("name:'%s'" % self.name)
return "<%s %s>" % (self.__class__.__name__, " ".join(fields))
def __str__(self):
if self.service is None:
return self.name
else:
return ".".join((str(self.service), self.name))
class ServiceDecoratorAction(DecoratorAction, ServiceAction):
def __init__(self, function, name=None, service=None, launcher=None):
super(ServiceDecoratorAction, self).__init__(function)
ServiceAction.__init__(self, name=name, service=service,
launcher=launcher)
service_action = ServiceDecoratorAction
class ServiceActionLauncher(ManagerDependantActionLauncher, ServiceAction):
def __init__(self, name=None, service=None, launcher=None):
super(ServiceActionLauncher, self).__init__(name, service.roles)
ServiceAction.__init__(self, name=name, service=service,
launcher=launcher)
def launch(self, *args, **kwargs):
super(ServiceActionLauncher, self).launch(*args, **kwargs)
class ServiceActionManager(ActionManager):
action_launcher_class = ServiceActionLauncher
_managed_obj_name = "service"
_manager_att
|
ribute_class = ServiceAction
def _get_base_dict(self):
servi
|
ce_action_class = self.action_launcher_class
actions = {}
defined_action_names = []
for aname, action in self.service._meta["actions"]:
defined_action_names.append(aname)
actions[aname] = action
for rname, role in self.service.roles:
for raname, action in role.actions:
if not raname in defined_action_names:
new_action = service_action_class(name=raname,
service=self.service)
actions[raname] = new_action
defined_action_names.append(raname)
return actions
|
shengqh/ngsperl
|
lib/SmallRNA/updateShortReadParentCount.py
|
Python
|
apache-2.0
| 7,328
| 0.019378
|
import argparse
import sys
import logging
import os
import csv
class ReadItem:
def __init__(self, sequence, totalCount):
self.Sequence = sequence
self.TotalCount = totalCount
self.SampleMap = {}
class AnnotationItem:
def __init__(self, sequence, totalCount, category, counts):
self.Sequence = sequence
self.TotalCount = totalCount
self.Categories = [category]
self.Counts = counts
def getValue(value):
return value.TotalCount
def getFilename(value):
return value[1]
def update(logger, args):
logger.info("Reading short reads:" + input + " ...")
shortReadMap = {}
shortReadFiles = []
shortFileList = []
with open(input, 'r') as sr:
for line in sr:
parts = line.rstrip().split('\t')
shortFileList.append(parts)
shortFileList = sorted(shortFileList, key=getFilename)
for parts in shortFileList:
sampleFile = parts[0]
sample = parts[1]
shortReadFiles.append(sample)
logger.info(" Reading " + sampleFile + " ...")
with open(sampleFile, 'r') as fin:
fin.readline()
for line in fin:
reads = line.rstrip().split('\t')
count = int(reads[1])
seq = reads[2].rstrip()
if not seq in shortReadMap:
ri = ReadItem(seq, count)
shortReadMap[seq] = ri
else:
ri = shortReadMap[seq]
ri.TotalCount += count
ri.SampleMap[sample] = count
if minSampleCount > 1 or minReadCount > 1:
shortReads
|
= []
for read in shortReadMap.values():
validSampleCount = len([v for v in read.SampleMap.values() if v >= minReadCount])
if validSampleCount >= minSampleCount:
shortReads.append(read)
else:
shortReads = shortReadMap.values()
shortReads = sorted(shortReads, key=getValue, reverse=True)
if len(shortReads) > maxNumber:
s
|
hortReads = shortReads[0:maxNumber]
logger.info("Reading max mapped reads:" + maxMapped + " ...")
maxmappedReads = {}
with open(maxMapped, 'r') as sr:
for line in sr:
parts = line.split('\t')
logger.info(" Reading " + parts[0] + " ...")
with open(parts[0], 'r') as fin:
while True:
qname = fin.readline().rstrip()
if not qname:
break
seq = fin.readline()
fin.readline()
fin.readline()
if qname.endswith("_"):
maxmappedReads[seq.rstrip()] = 1
cnames = names.split(",")
logger.info("Reading annotated reads:" + annotated + " ...")
annotatedReadMap = {}
annotatedFiles = []
with open(annotated, 'r') as annolist:
iIndex = -1
for row in annolist:
parts = row.split('\t')
annofile = parts[0]
iIndex = iIndex + 1
category = cnames[iIndex]
logger.info(" Reading " + annofile + " ...")
with open(annofile, 'r') as sr:
annotatedFiles = sr.readline().rstrip().split('\t')[1:]
for line in sr:
parts = line.rstrip().split('\t')
seq = parts[0]
if seq not in annotatedReadMap:
totalCount = sum(int(p) for p in parts[1:])
annotatedReadMap[seq] = AnnotationItem(seq, totalCount, category, parts[1:])
else:
annotatedReadMap[seq].Categories.append(category)
annotatedReads = sorted(annotatedReadMap.values(), key=getValue, reverse=True)
output = outputPrefix + ".tsv"
logger.info("Writing explain result:" + output + " ...")
with open(output, "w") as sw:
sw.write("ShortRead\tShortReadCount\tShortReadLength\t" + "\t".join(["SRS_" + f for f in shortReadFiles]) + "\tIsMaxMapped\tParentRead\tParentReadCount\tParentReadCategory\t" + "\t".join(["PRS_" + f for f in annotatedFiles]) + "\n")
emptyAnnotation = "\t\t\t\t" + "\t".join(["" for af in annotatedFiles]) + "\n"
for shortRead in shortReads:
shortSeq = shortRead.Sequence
shortSeqCount = shortRead.TotalCount
seqMap = shortRead.SampleMap
sw.write("%s\t%s\t%d" % (shortSeq, shortSeqCount, len(shortSeq)))
for fname in shortReadFiles:
if fname in seqMap:
sw.write("\t%s" % seqMap[fname])
else:
sw.write("\t0")
sw.write("\t" + str(shortSeq in maxmappedReads))
bFound = False
for annotatedRead in annotatedReads:
annoSeq = annotatedRead.Sequence
if shortSeq in annoSeq:
bFound = True
sw.write("\t%s\t%s\t%s\t%s\n" % (annoSeq, annotatedRead.TotalCount, "/".join(annotatedRead.Categories[0]), "\t".join(annotatedRead.Counts)))
break
if not bFound:
sw.write(emptyAnnotation)
logger.info("Done.")
def main():
parser = argparse.ArgumentParser(description="Matching short reads with annotated reads.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
DEBUG=False
NOT_DEBUG = not DEBUG
parser.add_argument('-i', '--input', action='store', nargs='?', help='Input short reads', required=NOT_DEBUG)
parser.add_argument('-m', '--maxMapped', action='store', nargs='?', help='Input reads exceed maximum mapping to genome', required=NOT_DEBUG)
parser.add_argument('-a', '--annotated', action='store', nargs='?', help='Input annotated reads', required=NOT_DEBUG)
parser.add_argument('-n', '--names', action='store', nargs='?', help='Input annotated reads categories, split by ''', required=NOT_DEBUG)
parser.add_argument('--maxNumber', action='store', default=100, nargs='?', help='Input number of top short reads for annotation')
parser.add_argument('--minReadCount', action='store', default=3, nargs='?', help='Input minimum copy of short reads in sample for annotation')
parser.add_argument('--minSampleCount', action='store', default=2, nargs='?', help='Input minimum number of sample with valid read count')
parser.add_argument('-o', '--output', action='store', nargs='?', default="-", help="Output prefix of matched reads file", required=NOT_DEBUG)
if NOT_DEBUG and len(sys.argv)==1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
if DEBUG:
args.input = "T:/Shared/Labs/Vickers Lab/Tiger/projects/20180809_smallRNA_269_933_2002_human/data_visualization/short_reads_source/result/match__fileList1.list"
args.maxMapped = "T:/Shared/Labs/Vickers Lab/Tiger/projects/20180809_smallRNA_269_933_2002_human/data_visualization/short_reads_source/result/match__fileList2.list"
args.annotated = "T:/Shared/Labs/Vickers Lab/Tiger/projects/20180809_smallRNA_269_933_2002_human/data_visualization/short_reads_source/result/match__fileList3.list"
args.names = "Host miRNA,Host tRNA,Host snRNA,Host snoRNA,Host rRNA,Host other small RNA,Host Genome,Microbiome Bacteria,Environment Bacteria,Fungus,Non host tRNA,Non host rRNA"
#args.names = "Host miRNA,Host tRNA"
args.output = "T:/Shared/Labs/Vickers Lab/Tiger/projects/20180809_smallRNA_269_933_2002_human/data_visualization/short_reads_source/result/match2"
logger = logging.getLogger('updateCount')
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)-8s - %(message)s')
match(logger, args.input, args.names, args.annotated, args.maxMapped, args.maxNumber, args.minReadCount, args.minSampleCount, args.output)
if __name__ == "__main__":
main()
|
zamattiac/SHARE
|
providers/edu/iowaresearch/__init__.py
|
Python
|
apache-2.0
| 1,863
| 0.000537
|
default_app_config = 'providers.edu.iowaresearch.apps.AppConfig'
"""
Example Record
<record>
<header>
<identifier>oai:ir.uiowa.edu:iwp_archive-1227</identifier>
<datestamp>2016-07-05T19:23:14Z</datestamp>
<setSpec>publication:iwp</setSpec>
<setSpec>publication:grad</setSpec>
<setSpec>publication:iwp_archive</setSpec>
<setSpec>publicati
|
on:harvest</setSpec>
<setSpec>publication:fullharvest</setSpec>
</header>
<metadata>
<oai_dc:dc xmlns:oai_dc="http://www.openarchives.org/OAI/2.0/oai_dc/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-inst
|
ance"
xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/oai_dc/
http://www.openarchives.org/OAI/2.0/oai_dc.xsd">
<dc:title>Writing Sample</dc:title>
<dc:creator>Gamerro, Carlos</dc:creator>
<dc:description>
Excerpts from The Adventure of the Busts of Eva Perón and The Islands.
</dc:description>
<dc:date>2008-10-01T07:00:00Z</dc:date>
<dc:type>text</dc:type>
<dc:format>application/pdf</dc:format>
<dc:identifier>http://ir.uiowa.edu/iwp_archive/228</dc:identifier>
<dc:identifier>
http://ir.uiowa.edu/cgi/viewcontent.cgi?article=1227&context=iwp_archive
</dc:identifier>
<dc:rights>Copyright © 2008 Carlos Gamerro</dc:rights>
<dc:source>
International Writing Program Archive of Residents' Work
</dc:source>
<dc:language>eng</dc:language>
<dc:publisher>Iowa Research Online</dc:publisher>
</oai_dc:dc>
</metadata>
</record>
"""
|
daniele-athome/kontalk-legacy-xmppserver
|
kontalk/xmppserver/version.py
|
Python
|
gpl-3.0
| 967
| 0
|
# -*- coding: utf-8 -*-
"""Version information."""
"""
Kontalk XMPP server
Copyright (C) 2014 Kontalk Devteam <devteam@kontalk.org>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public
|
License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
NAME = "Kontalk XMPP server"
IDENTITY = "kontalk"
PACKAGE = "kontalk-xmppserver"
VERSION = "devel"
AUTHORS = (
{
"name": "Daniele Ricci",
|
"email": "daniele.athome@gmail.com"
},
)
|
camallen/aggregation
|
experimental/condor/presentation/condor_IBCC.py
|
Python
|
apache-2.0
| 9,662
| 0.00859
|
#!/usr/bin/env python
__author__ = 'greghines'
import numpy as np
import os
import pymongo
import sys
import cPickle as pickle
import bisect
import random
import csv
import matplotlib.pyplot as plt
if os.path.exists("/home/ggdhines"):
base_directory = "/home/ggdhines"
else:
base_directory = "/home/greg"
def index(a, x):
'Locate the leftmost value exactly equal to x'
i = bisect.bisect_left(a, x)
if i != len(a) and a[i] == x:
return i
raise ValueError
sys.path.append(base_directory+"/github/reduction/experimental/classifier")
sys.path.append(base_directory+"/github/pyIBCC/python")
import ibcc
from iterativeEM import IterativeEM
if os.path.exists("/home/ggdhines"):
base_directory = "/home/ggdhines"
else:
base_directory = "/home/greg"
client = pymongo.MongoClient()
db = client['condor_2014-11-23']
classification_collection = db["condor_classifications"]
subject_collection = db["condor_subjects"]
gold = pickle.load(open(base_directory+"/condor_gold.pickle","rb"))
gold.sort(key = lambda x:x[1])
to_sample_from = (zip(*gold)[0])[1301:]
sample = random.sample(to_sample_from,100)
big_userList = []
big_subjectList = []
animal_count = 0
f = open(base_directory+"/Databases/condor_ibcc.csv","wb")
f.write("a,b,c\n")
alreadyDone = []
subjectVote = {}
gold_condor = []
only_one = []
vote_list = []
for count,zooniverse_id in enumerate(sample):
subject = subject_collection.find_one({"zooniverse_id":zooniverse_id})
if subject["classification_count"] < 3:
print "**"
only_one.append(zooniverse_id)
continue
print count
#gold standard
gold_classification = classification_collection.find_one({"user_name":"wreness", "subjects.zooniverse_id":zooniverse_id})
assert gold_classification["tutorial"] == False
found_condor = False
try:
mark_index = [ann.keys() for ann in gold_classification["annotations"]].index(["marks",])
markings = gold_classification["annotations"][mark_index].values()[0]
try:
for animal in markings.values():
animal_type = animal["animal"]
found_condor = (animal_type == "condor")
except KeyError:
continue
except ValueError:
pass
if found_condor:
gold_condor.append(1)
else:
gold_condor.append(0)
alreadyDone = []
classification_count = 0
for classification in classification_collection.find({"subjects.zooniverse_id":zooniverse_id}):
if "user_name" in classification:
user = classification["user_name"]
else:
user = classification["user_ip"]
#print user
if ("user_name" in classification) and (classification["user_name"] == "wreness"):
continue
if user in alreadyDone:
continue
classification_count += 1
if classification_count == 3:
break
alreadyDone.append(user)
if not(user in big_userList):
big_userList.append(user)
if not(zooniverse_id in big_subjectList):
big_subjectList.append(zooniverse_id)
user_index = big_userList.index(user)
subject_index = big_subjectList.index(zooniverse_id)
try:
mark_index = [ann.keys() for ann in classification["annotations"]].index(["marks",])
markings = classification["annotations"][mark_index].values()[0]
found = False
for animal in markings.values():
animal_type = animal["animal"]
if animal_type in ["condor"]:
found = True
break
if found:
vote_list.append((user_index,subject_index,1))
f.write(str(user_index) + ","+str(subject_index) + ",1\n")
if not(zooniverse_id in subjectVote):
subjectVote[zooniverse_id] = [1]
else:
subjectVote[zooniverse_id].append(1)
else:
vote_list.append((user_index,subject_index,0))
f.write(str(user_index) + ","+str(subject_index) + ",0\n")
if not(zooniverse_id in subjectVote):
subjectVote[zooniverse_id] = [0]
else:
subjectVote[zooniverse_id].append(0)
except (ValueError,KeyError):
f.write(str(user_index) + ","+str(subject_index) + ",0\n")
if not(zooniverse_id in subjectVote):
subjectVote[zooniverse_id] = [0]
else:
subjectVote[zooniverse_id].append(0)
if classification_count == 0:
print subject
assert classification_count > 0
condor_count = 0.
total_count = 0.
false_positives = []
true_positives = []
false_negatives = []
true_negatives = []
confusion = [[0.,0.],[0.,0.]]
for votes in subjectVote.values():
if np.mean(votes) >= 0.5:
condor_count += 1
confusion[1][1] += np.mean(votes)
confusion[1][0] += 1 - np.mean(votes)
true_positives.append(np.mean(votes))
#false_negatives.append(1-np.mean(votes))
else:
#false_positives.append(np.mean(votes))
true_negatives.append(1-np.mean(votes))
confusion[0][0] += 1 - np.mean(votes)
confusion[0][1] += np.mean(votes)
total_count += 1
pp = condor_count / total_count
print confusion
confusion = [[max(int(confusion[0][0]),1),max(int(confusion[0][1]),1)],[max(int(confusion[1][0]),1),max(int(confusion[1][1]),1)]]
print confusion
print pp
f.close()
with open(base_directory+"/Databases/condor_ibcc.py","wb") as f:
f.write("import numpy as np\n")
f.write("scores = np.array([0,1])\n")
f.write("nScores = len(scores)\n")
f.write("nClasses = 2\n")
f.write("inputFile = \""+base_directory+"/Databases/condor_ibcc.csv\"\n")
f.write("outputFile = \""+base_directory+"/Databases/condor_ibcc.out\"\n")
f.write("confMatFile = \""+base_directory+"/Databases/condor_ibcc.mat\"\n")
f.write("nu0 = np.array(["+str(int((1-pp)*100))+","+str(int(pp*100))+"])\n")
f.write("alpha0 = np.array("+str(confusion)+")\n")
#f.write("alpha0 = np.array([[185,1],[6,52]])\n")
#f.write("alpha0 = np.array([[3,1],[1,3]])\n")
#start by removing all temp files
try:
os.remove(base_directory+"/Databases/condor_ibcc.out")
except OSError:
pass
try:
os.remove(base_directory+"/Databases/condor_ibcc.mat")
except OSError:
pass
try:
os.remove(base_directory+"/Databases/condor_ibcc.csv.dat")
except OSError:
pass
#pickle.dump((big_subjectList,big_userList),open(base_directory+"/Databases/tempOut.pickle","wb"))
ibcc.runIbcc(base_directory+"/Databases/condor_ibcc.py")
values = []
errors = 0
low = 0
X_positive = []
X_negative = []
with open(base_directory+"/Databases/condor_ibcc.out","rb") as f:
ibcc_results = csv.reader(f, delimiter=' ')
for ii,row in enumerate(ibcc_results):
if ii == 20000:
break
wreness_condor = gold_condor[ii]
ibcc_condor = float(row[2])
if wreness_condor == 0:
X_negative.append(ibcc_condor)
else:
X_positive.append(ibcc_condor)
#print X_negative
# print X_positive
# plt.hist([X_positive,X_negative],10)
# plt.show()
alpha_list = X_negative[:]
alpha_list.extend(X_positive)
alpha_list.sort()
roc_X = []
roc_Y = []
for alpha in alpha_list:
positive_count = sum([1 for x in X_positive if x >= alpha])
positive_rate = positive_count/float(len(X_positive))
negative_count
|
= sum([1 for x in X_negative if x >= alpha])
negative_rate = negative_count/float(len(X_negative))
roc_X.append(negative_rate)
roc_Y.append(positive_rate)
#print roc_X
plt.plot(roc_X,roc_Y,color="red")
X_positive = []
X_negative = []
#repeat with MV
for subject_index,zooniverse_id in enumerate(big_subjectList):
votes = subjectVote[zooniverse_id]
wreness_c
|
ondor = gold_condor[subject_index]
if wreness_condor == 0:
X_negative.append(np.mean(votes))
else:
X_positive.append(np.mean(votes))
alpha_list = X_negative[:]
alpha_list.extend(X_posi
|
sunlightlabs/wikipedia-dump-tools
|
wikitools/__init__.py
|
Python
|
gpl-3.0
| 71
| 0
|
# -*- coding: utf-8 -*-
from exceptions
|
imp
|
ort DropPage, AbortProcess
|
DreamSourceLab/DSView
|
libsigrokdecode4DSL/decoders/1-spi/pd.py
|
Python
|
gpl-3.0
| 13,821
| 0.002532
|
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2011 Gareth McMullin <gareth@blacksphere.co.nz>
## Copyright (C) 2012-2014 Uwe Hermann <uwe@hermann-uwe.de>
## Copyright (C) 2019 DreamSourceLab <support@dreamsourcelab.com>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sigrokdecode as srd
from collections import namedtuple
Data = namedtuple('Data', ['ss', 'es', 'val'])
'''
OUTPUT_PYTHON format:
Packet:
[<ptype>, <data1>, <data2>]
<ptype>:
- 'DATA': <data1> contains the MOSI data, <data2> contains the MISO data.
The data is _usually_ 8 bits (but can also be fewer or more bits).
Both data items are Python numbers (not strings), or None if the respective
channel was not supplied.
- 'BITS': <data1>/<data2> contain a list of bit values in this MOSI/MISO data
item, and for each of those also their respective start-/endsample numbers.
- 'CS-CHANGE': <data1> is the old CS# pin value, <data2> is the new value.
Both data items are Python numbers (0/1), not strings. At the beginning of
the decoding a packet is generated with <data1> = None and <data2> being the
initial state of the CS# pin or None if the chip select pin is not supplied.
- 'TRANSFER': <data1>/<data2> contain a list of Data() namedtuples for each
byte transferred during this block of CS# asserted time. Each Data() has
fields ss, es, and val.
Examples:
['CS-CHANGE', None, 1]
['CS-CHANGE', 1, 0]
['DATA', 0xff, 0x3a]
['BITS', [[1, 80, 82], [1, 83, 84], [1, 85, 86], [1, 87, 88],
[1, 89, 90], [1, 91, 92], [1, 93, 94], [1, 95, 96]],
[[0, 80, 82], [1, 83, 84], [0, 85, 86], [1, 87, 88],
[1, 89, 90], [1, 91, 92], [0, 93, 94], [0, 95, 96]]]
['DATA', 0x65, 0x00]
['DATA', 0xa8, None]
['DATA', None, 0x55]
['CS-CHANGE', 0, 1]
['TRANSFER', [Data(ss=80, es=96, val=0xff), ...],
[Data(ss=80, es=96, val=0x3a), ...]]
'''
# Key: (CPOL, CPHA). Value: SPI mode.
# Clock polarity (CPOL) = 0/1: Clock is low/high when inactive.
# Clock phase (CPHA) = 0/1: Data is valid on the leading/trailing clock edge.
spi_mode = {
(0, 0): 0, # Mode 0
(0, 1): 1, # Mode 1
(1, 0): 2, # Mode 2
(1, 1): 3, # Mode 3
}
class ChannelError(Exception):
pass
class Decoder(srd.Decoder):
api_version = 3
id = '1:spi'
name = '1:SPI'
longname = 'Serial Peripheral Interface'
desc = 'Full-duplex, synchronous, serial bus.'
license = 'gplv2+'
inputs = ['logic']
outputs = ['spi']
tags = ['Embedded/industrial']
channels = (
{'id': 'clk', 'type': 0, 'name': 'CLK', 'desc': 'Clock'},
)
optional_channels = (
{'id': 'miso', 'type': 107, 'name': 'MISO', 'desc': 'Master in, slave out'},
{'id': 'mosi', 'type': 109, 'name': 'MOSI', 'desc': 'Master out, slave in'},
{'id': 'cs', 'type': -1, 'name': 'CS#', 'desc': 'Chip-select'},
)
options = (
{'id': 'cs_polarity', 'desc': 'CS# polarity', 'default': 'active-low',
'values': ('active-low', 'active-high')},
{'id': 'cpol', 'desc': 'Clock polarity (CPOL)', 'default': 0,
'values': (0, 1)},
{'id': 'cpha', 'desc': 'Clock phase (CPHA)', 'default': 0,
'values': (0, 1)},
{'id': 'bitorder', 'desc': 'Bit order',
'default': 'msb-first', 'values': ('msb-first', 'lsb-first')},
{'id': 'wordsize', 'desc': 'Word size', 'default': 8,
'values': tuple(range(5,129,1))},
{'id': 'frame', 'desc': 'Frame Decoder', 'default': 'no',
'values': ('yes', 'no')},
)
annotations = (
('106', 'miso-data', 'MISO data'),
('108', 'mosi-data', 'MOSI data'),
('207', 'miso-bits', 'MISO bits'),
('209', 'mosi-bits', 'MOSI bits'),
('1000', 'warnings', 'Human-readable warnings'),
('6', 'miso-transfer', 'MISO transfer'),
('8', 'mosi-transfer', 'MOSI transfer'),
)
annotation_rows = (
('miso-bits', 'MISO bits', (2,)),
('miso-data', 'MISO data', (0,)),
('miso-transfer', 'MISO transfer', (5,)),
('mosi-bits', 'MOSI bits', (3,)),
('mosi-data', 'MOSI data', (1,)),
('mosi-transfer', 'MOSI transfer', (6,)),
('other', 'Other', (4,)),
)
binary = (
('miso', 'MISO'),
('mosi', 'MOSI'),
)
def __init__(self):
self.reset()
def reset(self):
self.samplerate = None
self.bitcount = 0
self.misodata = self.mosidata = 0
|
self.misobits = []
self.mosibits = []
self.misobytes = []
self.mosibytes = []
self.ss_block = -1
self.samplenum = -1
self.ss_transfer = -1
self.cs_was_deasserted = False
self.have_cs = self.have_miso = self.have_mosi = None
def start(self):
self.out_python = self.register(srd.OUTPUT_PYTHON)
self.out_ann = self.register(
|
srd.OUTPUT_ANN)
self.out_binary = self.register(srd.OUTPUT_BINARY)
self.out_bitrate = self.register(srd.OUTPUT_META,
meta=(int, 'Bitrate', 'Bitrate during transfers'))
self.bw = (self.options['wordsize'] + 7) // 8
def metadata(self, key, value):
if key == srd.SRD_CONF_SAMPLERATE:
self.samplerate = value
def putw(self, data):
self.put(self.ss_block, self.samplenum, self.out_ann, data)
def putdata(self, frame):
# Pass MISO and MOSI bits and then data to the next PD up the stack.
so = self.misodata if self.have_miso else None
si = self.mosidata if self.have_mosi else None
so_bits = self.misobits if self.have_miso else None
si_bits = self.mosibits if self.have_mosi else None
if self.have_miso:
ss, es = self.misobits[-1][1], self.misobits[0][2]
bdata = so.to_bytes(self.bw, byteorder='big')
self.put(ss, es, self.out_binary, [0, bdata])
if self.have_mosi:
ss, es = self.mosibits[-1][1], self.mosibits[0][2]
bdata = si.to_bytes(self.bw, byteorder='big')
self.put(ss, es, self.out_binary, [1, bdata])
self.put(ss, es, self.out_python, ['BITS', si_bits, so_bits])
self.put(ss, es, self.out_python, ['DATA', si, so])
if frame:
if self.have_miso:
self.misobytes.append(Data(ss=ss, es=es, val=so))
if self.have_mosi:
self.mosibytes.append(Data(ss=ss, es=es, val=si))
# Bit annotations.
if self.have_miso:
for bit in self.misobits:
self.put(bit[1], bit[2], self.out_ann, [2, ['%d' % bit[0]]])
if self.have_mosi:
for bit in self.mosibits:
self.put(bit[1], bit[2], self.out_ann, [3, ['%d' % bit[0]]])
# Dataword annotations.
if self.have_miso:
self.put(ss, es, self.out_ann, [0, ['%02X' % self.misodata]])
if self.have_mosi:
self.put(ss, es, self.out_ann, [1, ['%02X' % self.mosidata]])
def reset_decoder_state(self):
self.misodata = 0 if self.have_miso else None
self.mosidata = 0 if self.have_mosi else None
self.misobits = [] if self.have_miso else None
self.mosibits = [] if self.have_mosi else None
self.bitcount = 0
def cs_asserted(self, cs):
active_low = (self.options['cs_polarity'] == 'active-low')
return (cs == 0) if active_low else (cs == 1)
def handle_bit(self, miso, mosi, clk, cs, frame):
# If this is the first bit of a dataword, save its sa
|
cycladesnz/chambersAndCreatures
|
src/effects/dv_effects.py
|
Python
|
gpl-2.0
| 3,581
| 0.006981
|
import copy
from pdcglobal import *
from .effect import Effect
import dungeon
class FloatingEyeGazeEffect(Effect):
def __init__(self, host, owner):
dur = d(10)
Effect.__init__(self, dur, host, owner)
weaponinfotext = 'Stuns the enemy'
def tick(self):
self.host.timer += 1000
if self.host == self.host.game.player:
self.host.game.shout('You are stunned by the Floating
|
Eye`s gaze!')
else:
self.host.game.shout('%s is stu
|
nned by the Floating Eye`s gaze!' % (self.host.name))
Effect.tick(self)
class AcidSplatterEffect(Effect):
notrigger=[]
def __init__(self, host, owner):
dur = 1
Effect.__init__(self, dur, host, owner)
actors = owner.game.get_all_srd_actors(owner.pos())
for act in actors:
Effect.__init__(self, dur, act, owner)
weaponinfotext = 'Splatters the enemy'
def tick(self):
self.host.game.do_damage(self.host, d(3), D_ACID, self.owner)
if self.host == self.host.game.player:
self.host.game.shout('You are splashed by acid!')
else:
self.host.game.shout('%s is splashed by acid!' % (self.host.name))
Effect.tick(self)
class FrostEffect(Effect):
def __init__(self, host, owner):
dur = 1
Effect.__init__(self, dur, host, owner)
weaponinfotext = 'Freezes the enemy'
def tick(self):
self.host.game.do_damage(self.host, d(3), D_COLD, self.owner)
if self.host == self.host.game.player:
self.host.game.shout('You are freezing!')
else:
self.host.game.shout('%s is freezing!' % (self.host.name))
Effect.tick(self)
class HeatEffect(Effect):
def __init__(self, host, owner):
dur = 1
Effect.__init__(self, dur, host, owner)
weaponinfotext = 'Burns the enemy'
def tick(self):
self.host.game.do_damage(self.host, d(3), D_FIRE, self.owner)
if self.host == self.host.game.player:
self.host.game.shout('You are getting burned!')
else:
self.host.game.shout('%s is getting burned!' % (self.host.name))
Effect.tick(self)
class SplitEffect(Effect):
notrigger=[]
def __init__(self, host, owner):
dur = 1
Effect.__init__(self, dur, host, owner)
weaponinfotext = 'You should not read this'
def tick(self):
new_pos = self.host.game.get_free_adj(self.owner.pos())
if new_pos != None:
self.owner.game.shout('%s splits in half!' % (self.owner.name))
new = dungeon.Populator.create_creature(self.owner.pop_name, self.owner.filename)
new.set_pos(new_pos)
new.game.add_actor(new)
self.owner.health = self.owner.health / 2 + 1
self.owner.cur_health = self.owner.cur_health / 2 + 1
new.health = self.owner.health
new.cur_health = self.owner.cur_health
new.xp_value = self.owner.xp_value / 3 + 2
Effect.tick(self)
class DazzleEffect(Effect):
def __init__(self, host, owner):
dur = d(4)
Effect.__init__(self, dur, host, owner)
weaponinfotext = 'Blinds the enemy'
def tick(self):
self.host.dazzled = True
if self.host == self.host.game.player:
self.host.game.shout('You are blinded!')
else:
self.host.game.shout('%s is blinded!' % (self.host.name))
Effect.tick(self)
|
dchaplinsky/declarations.com.ua
|
declarations_site/catalog/elastic_models.py
|
Python
|
mit
| 66,029
| 0.001889
|
import re
import os.path
from operator import or_
from functools import reduce
from datetime import date
import logging
import urllib.parse
from django.conf import settings
from django.urls import reverse
from django.db.models.functions import ExtractYear
from django.db.models import Sum, Count
from django.template.loader import render_to_string
from django.utils.translation import gettext_lazy as _, get_language
from elasticsearch_dsl import (
DocType,
Object,
Keyword,
MetaField,
Text,
Completion,
Nested,
Date,
Boolean,
Search,
Double,
Index,
analyzer,
tokenizer,
)
from elasticsearch_dsl.query import Q
import jmespath
from procurements.models import Transactions
from .constants import (
CATALOG_INDICES,
BANK_EDRPOUS,
INCOME_TYPES,
MONETARY_ASSETS_TYPES,
OLD_DECLARATION_INDEX,
NACP_DECLARATION_INDEX,
NACP_DECLARATION_NEW_FORMAT_INDEX,
NUMBER_OF_SHARDS,
NUMBER_OF_REPLICAS,
NACP_SELECTORS_TO_TRANSLATE,
PAPER_SELECTORS_TO_TRANSLATE,
)
from .utils import parse_fullname, blacklist
from .templatetags.catalog import parse_ra
|
w_family_string
from .converters import PaperToNACPConverter, ConverterError
from .translator import HTMLTranslator
logger = logging.getLogger(__name__)
|
class NoneAwareDate(Date):
"""Elasticsearch DSL Date field chokes on None values and parses empty
strings as current date, hence the workaround.
TODO: move this upstream in some form."""
def _to_python(self, data):
if data is None:
return data
return super(NoneAwareDate, self)._to_python(data)
namesAutocompleteAnalyzer = analyzer(
"namesAutocompleteAnalyzer",
tokenizer=tokenizer(
"autocompleteTokenizer",
type="edge_ngram",
min_gram=1,
max_gram=25,
token_chars=["letter", "digit"],
),
filter=["lowercase"],
)
namesAutocompleteSearchAnalyzer = analyzer(
"namesAutocompleteSearchAnalyzer", tokenizer=tokenizer("whitespace"), filter=["lowercase"]
)
class AbstractDeclaration(object):
def infocard(self):
raise NotImplemented()
def raw_source(self):
raise NotImplemented()
def unified_source(self):
raise NotImplemented()
def related_entities(self):
raise NotImplemented()
@property
def original_url(self):
raise NotImplemented()
def _is_change_form(self):
raise NotImplemented
def related_documents(self):
return [
document.api_response(fields=["related_entities", "guid", "aggregated_data"])
for document in self.similar_declarations(limit=100)
if not document._is_change_form()
]
def guid(self):
return self.meta.id
def extra_phrases(self):
return [
self.general.post.post,
self.general.post.office,
self.general.post.region,
getattr(self.general.post, "actual_region", ""),
self.intro.doc_type,
]
def prepare_translations(self, language, infocard_only=False):
assert self.CONTENT_SELECTORS, "You should define CONTENT_SELECTORS first"
if language == "en":
extra_phrases = self.extra_phrases()
if infocard_only:
self.translator = HTMLTranslator(html=None, selectors=[], extra_phrases=extra_phrases)
else:
self.translator = HTMLTranslator(
html=self.raw_html(),
selectors=self.CONTENT_SELECTORS,
extra_phrases=extra_phrases,
)
def raw_en_html(self):
assert hasattr(self, "translator"), "You should call prepare_translations first"
return self.translator.get_translated_html()
def _name_search_query(self):
name = "{} {} {}".format(self.general.last_name, self.general.name, self.general.patronymic).strip()
return urllib.parse.quote(name)
def _full_name(self, language):
name = "{} {} {}".format(self.general.last_name, self.general.name, self.general.patronymic).strip()
if language == "en":
assert hasattr(self, "translator"), "You should call prepare_translations first"
phrase = self.translator.translate(name, just_transliterate=True)
return phrase["translation"]
else:
return name
def _translate_one_field(self, field, language):
if field:
if language == "en":
assert hasattr(self, "translator"), "You should call prepare_translations first"
phrase = self.translator.translate(field)
return phrase["translation"]
else:
return field
else:
return ""
def _position(self, language):
return self._translate_one_field(self.general.post.post, language)
def _office(self, language):
return self._translate_one_field(self.general.post.office, language)
def _region(self, language):
return self._translate_one_field(self.general.post.region, language)
def _actual_region(self, language):
return self._translate_one_field(self.general.post.actual_region, language)
def _declaration_type(self, language):
return self._translate_one_field(self.intro.doc_type, language)
def api_response(self, fields=None):
all_fields = ["guid", "infocard", "raw_source", "unified_source", "related_entities"]
if fields is None:
fields = all_fields
else:
fields = [f for f in fields if f in set(all_fields + ["guid", "aggregated_data", "related_documents"])]
return {f: getattr(self, f)() for f in fields}
def similar_declarations(self, language=None, limit=12):
res = {"exact": [], "maybe": []}
if getattr(self.intro, "user_declarant_id", None):
index = OLD_DECLARATION_INDEX
res["exact"] = (
NACPDeclaration.search()
.filter("term", **{"intro.user_declarant_id": self.intro.user_declarant_id})
.query(~Q("term", _id=self.meta.id))
.sort("-intro.doc_type")
)
else:
index = CATALOG_INDICES
fields = [
"general.last_name",
"general.name",
"general.patronymic",
"general.full_name",
]
res["maybe"] = (
Search(index=index)
.query(
"multi_match",
query=self.general.full_name,
operator="and",
fields=fields,
)
.query(~Q("term", _id=self.meta.id))
)
for k, s in res.items():
if not s:
continue
s = s.doc_type(NACPDeclaration, Declaration)
if k == "maybe":
s = s[:limit]
else:
s = s[:30]
res[k] = s.execute()
if language is not None:
for d in res[k]:
d.prepare_translations(language, infocard_only=True)
return res
def family_declarations(self, language=None, limit=12, return_full_body=False):
def filter_silly_names(name):
if not name:
return False
last_name, first_name, patronymic = parse_fullname(name)
if len(first_name) == 1 or first_name.endswith("."):
return False
if len(patronymic) == 1 or patronymic.endswith("."):
return False
return True
s = Search(index=CATALOG_INDICES)
family_members = self.get_family_members()
subqs = []
for name in filter(filter_silly_names, family_members):
subqs.append(
Q(
"multi_match",
query=name,
operator="and",
fields=[
"general.last_name",
"general.name",
"general.patronymic",
"general.full_name",
|
lnielsen/invenio
|
invenio/modules/messages/models.py
|
Python
|
gpl-2.0
| 8,905
| 0.007748
|
# -*- coding: utf-8 -*-
#
## This file is part of Invenio.
## Copyright (C) 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
WebMessage database models.
"""
# General imports
from invenio.base.globals import cfg
from invenio.ext.sqlalchemy import db
# Create your models here.
from string import strip
from invenio.modules.accounts.models import User, Usergroup
from sqlalchemy.ext.associationproxy import association_proxy
class MsgMESSAGE(db.Model):
"""Represents a MsgMESSAGE record."""
def __str__(self):
return "From: %s<%s>, Subject: <%s> %s" % \
(self.user_from.nickname or _('None'),
self.user_from.email or _('unknown'),
self.subject, self.body)
__tablename__ = 'msgMESSAGE'
id = db.Column(db.Integer(15, unsigned=True), nullable=False,
primary_key=True,
autoincrement=True)
id_user_from = db.Column(db.Integer(15, unsigned=True),
db.ForeignKey(User.id),
nullable=True, server_default='0')
_sent_to_user_nicks = db.Column(db.Text, name='sent_to_user_nicks',
nullable=False)
_sent_to_group_names = db.Column(db.Text, name='sent_to_group_names',
n
|
ullable=False)
subject = db.Column(db.Text, nullable=False)
body = db.Column(db.Text, nullable=True)
sent_date = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00') # db.func.now() -> 'NOW()'
receiv
|
ed_date = db.Column(db.DateTime,
server_default='1900-01-01 00:00:00')
user_from = db.relationship(User, backref='sent_messages')
#recipients = db.relationship(User,
# secondary=lambda: UserMsgMESSAGE.__table__,
# collection_class=set)
recipients = association_proxy('sent_to_users', 'user_to',
creator=lambda u:UserMsgMESSAGE(user_to=u))
@db.hybrid_property
def sent_to_user_nicks(self):
""" Alias for column 'sent_to_user_nicks'. """
return self._sent_to_user_nicks
@db.hybrid_property
def sent_to_group_names(self):
""" Alias for column 'sent_to_group_names'. """
return self._sent_to_group_names
@db.validates('_sent_to_user_nicks')
def validate_sent_to_user_nicks(self, key, value):
user_nicks = filter(len, map(strip,
value.split(cfg['CFG_WEBMESSAGE_SEPARATOR'])))
assert len(user_nicks) == len(set(user_nicks))
if len(user_nicks) > 0:
assert len(user_nicks) == \
User.query.filter(User.nickname.in_(user_nicks)).count()
return cfg['CFG_WEBMESSAGE_SEPARATOR'].join(user_nicks)
@db.validates('_sent_to_group_names')
def validate_sent_to_group_names(self, key, value):
group_names = filter(len, map(strip,
value.split(cfg['CFG_WEBMESSAGE_SEPARATOR'])))
assert len(group_names) == len(set(group_names))
if len(group_names) > 0:
assert len(group_names) == \
Usergroup.query.filter(Usergroup.name.in_(group_names)).count()
return cfg['CFG_WEBMESSAGE_SEPARATOR'].join(group_names)
@sent_to_user_nicks.setter
def sent_to_user_nicks(self, value):
old_user_nicks = self.user_nicks
self._sent_to_user_nicks = value
to_add = set(self.user_nicks)-set(old_user_nicks)
to_del = set(old_user_nicks)-set(self.user_nicks)
if len(self.group_names):
to_del = to_del-set([u.nickname for u in User.query.\
join(User.usergroups).filter(
Usergroup.name.in_(self.group_names)).\
all()])
if len(to_del):
is_to_del = lambda u: u.nickname in to_del
remove_old = filter(is_to_del, self.recipients)
for u in remove_old:
self.recipients.remove(u)
if len(to_add):
for u in User.query.filter(User.nickname.\
in_(to_add)).all():
if u not in self.recipients:
self.recipients.append(u)
@sent_to_group_names.setter
def sent_to_group_names(self, value):
old_group_names = self.group_names
self._sent_to_group_names = value
groups_to_add = set(self.group_names)-set(old_group_names)
groups_to_del = set(old_group_names)-set(self.group_names)
if len(groups_to_del):
to_del = set([u.nickname for u in User.query.\
join(User.usergroups).filter(
Usergroup.name.in_(groups_to_del)).\
all()])-set(self.user_nicks)
is_to_del = lambda u: u.nickname in to_del
remove_old = filter(is_to_del, self.recipients)
for u in remove_old:
self.recipients.remove(u)
if len(groups_to_add):
for u in User.query.join(User.usergroups).filter(db.and_(
Usergroup.name.in_(groups_to_add),
db.not_(User.nickname.in_(self.user_nicks)))).all():
if u not in self.recipients:
self.recipients.append(u)
@property
def user_nicks(self):
if not self._sent_to_user_nicks:
return []
return filter(len, map(strip,
self._sent_to_user_nicks.split(cfg['CFG_WEBMESSAGE_SEPARATOR'])))
@property
def group_names(self):
if not self._sent_to_group_names:
return []
return filter(len, map(strip,
self.sent_to_group_names.split(cfg['CFG_WEBMESSAGE_SEPARATOR'])))
#TODO consider moving following lines to separate file.
from invenio.modules.messages.config import CFG_WEBMESSAGE_EMAIL_ALERT
from invenio.config import CFG_WEBCOMMENT_ALERT_ENGINE_EMAIL
from invenio.utils.date import datetext_format
from datetime import datetime
def email_alert(mapper, connection, target):
""" Sends email alerts to message recipients. """
from invenio.ext.template import render_template_to_string
from invenio.ext.email import send_email, scheduled_send_email
m = target
is_reminder = m.received_date is not None \
and m.received_date > datetime.now()
alert = send_email
if is_reminder:
alert = lambda *args, **kwargs: scheduled_send_email(*args,
other_bibtasklet_arguments=[
m.received_date.strftime(datetext_format)],
**kwargs)
for u in m.recipients:
if isinstance(u.settings, dict) and \
u.settings.get('webmessage_email_alert', True):
try:
alert(
cfg['CFG_WEBCOMMENT_ALERT_ENGINE_EMAIL'],
u.email,
subject = m.subject,
content = render_template_to_string(
'messages/email_alert.html',
message=m, user=u))
except:
# FIXME tests are not in request context
pass
# Registration of email_alert invoked from blueprint
# in order to use before_app_first_request.
# Reading config CFG_WEBMESSAGE_EMAIL_ALERT
# required app context.
def email_alert_register():
if cfg['CFG_WEBMESSAGE_EMAIL_ALERT']:
from sqlalchemy import event
# Register after insert callback.
event.listen(MsgMESSAGE, 'after_insert', email_alert)
class UserMsgMESSAGE(db.Model):
"""Represents a UserMsgMESSAGE record."""
__tablename__ = 'user_msgMESSAGE'
id_user_to
|
oliverhr/odoo
|
addons/stock/stock.py
|
Python
|
agpl-3.0
| 269,349
| 0.005732
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004
|
-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but W
|
ITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import date, datetime
from dateutil import relativedelta
import json
import time
from openerp.osv import fields, osv
from openerp.tools.float_utils import float_compare, float_round
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT
from openerp.exceptions import Warning
from openerp import SUPERUSER_ID, api
import openerp.addons.decimal_precision as dp
from openerp.addons.procurement import procurement
import logging
_logger = logging.getLogger(__name__)
#----------------------------------------------------------
# Incoterms
#----------------------------------------------------------
class stock_incoterms(osv.osv):
_name = "stock.incoterms"
_description = "Incoterms"
_columns = {
'name': fields.char('Name', required=True, help="Incoterms are series of sales terms. They are used to divide transaction costs and responsibilities between buyer and seller and reflect state-of-the-art transportation practices."),
'code': fields.char('Code', size=3, required=True, help="Incoterm Standard Code"),
'active': fields.boolean('Active', help="By unchecking the active field, you may hide an INCOTERM you will not use."),
}
_defaults = {
'active': True,
}
#----------------------------------------------------------
# Stock Location
#----------------------------------------------------------
class stock_location(osv.osv):
_name = "stock.location"
_description = "Inventory Locations"
_parent_name = "location_id"
_parent_store = True
_parent_order = 'name'
_order = 'parent_left'
_rec_name = 'complete_name'
def _location_owner(self, cr, uid, location, context=None):
''' Return the company owning the location if any '''
return location and (location.usage == 'internal') and location.company_id or False
def _complete_name(self, cr, uid, ids, name, args, context=None):
""" Forms complete name of location from parent location to child location.
@return: Dictionary of values
"""
res = {}
for m in self.browse(cr, uid, ids, context=context):
res[m.id] = m.name
parent = m.location_id
while parent:
res[m.id] = parent.name + ' / ' + res[m.id]
parent = parent.location_id
return res
def _get_sublocations(self, cr, uid, ids, context=None):
""" return all sublocations of the given stock locations (included) """
if context is None:
context = {}
context_with_inactive = context.copy()
context_with_inactive['active_test'] = False
return self.search(cr, uid, [('id', 'child_of', ids)], context=context_with_inactive)
def _name_get(self, cr, uid, location, context=None):
name = location.name
while location.location_id and location.usage != 'view':
location = location.location_id
name = location.name + '/' + name
return name
def name_get(self, cr, uid, ids, context=None):
res = []
for location in self.browse(cr, uid, ids, context=context):
res.append((location.id, self._name_get(cr, uid, location, context=context)))
return res
_columns = {
'name': fields.char('Location Name', required=True, translate=True),
'active': fields.boolean('Active', help="By unchecking the active field, you may hide a location without deleting it."),
'usage': fields.selection([
('supplier', 'Supplier Location'),
('view', 'View'),
('internal', 'Internal Location'),
('customer', 'Customer Location'),
('inventory', 'Inventory'),
('procurement', 'Procurement'),
('production', 'Production'),
('transit', 'Transit Location')],
'Location Type', required=True,
help="""* Supplier Location: Virtual location representing the source location for products coming from your suppliers
\n* View: Virtual location used to create a hierarchical structures for your warehouse, aggregating its child locations ; can't directly contain products
\n* Internal Location: Physical locations inside your own warehouses,
\n* Customer Location: Virtual location representing the destination location for products sent to your customers
\n* Inventory: Virtual location serving as counterpart for inventory operations used to correct stock levels (Physical inventories)
\n* Procurement: Virtual location serving as temporary counterpart for procurement operations when the source (supplier or production) is not known yet. This location should be empty when the procurement scheduler has finished running.
\n* Production: Virtual counterpart location for production operations: this location consumes the raw material and produces finished products
\n* Transit Location: Counterpart location that should be used in inter-companies or inter-warehouses operations
""", select=True),
'complete_name': fields.function(_complete_name, type='char', string="Location Name",
store={'stock.location': (_get_sublocations, ['name', 'location_id', 'active'], 10)}),
'location_id': fields.many2one('stock.location', 'Parent Location', select=True, ondelete='cascade'),
'child_ids': fields.one2many('stock.location', 'location_id', 'Contains'),
'partner_id': fields.many2one('res.partner', 'Owner', help="Owner of the location if not internal"),
'comment': fields.text('Additional Information'),
'posx': fields.integer('Corridor (X)', help="Optional localization details, for information purpose only"),
'posy': fields.integer('Shelves (Y)', help="Optional localization details, for information purpose only"),
'posz': fields.integer('Height (Z)', help="Optional localization details, for information purpose only"),
'parent_left': fields.integer('Left Parent', select=1),
'parent_right': fields.integer('Right Parent', select=1),
'company_id': fields.many2one('res.company', 'Company', select=1, help='Let this field empty if this location is shared between companies'),
'scrap_location': fields.boolean('Is a Scrap Location?', help='Check this box to allow using this location to put scrapped/damaged goods.'),
'removal_strategy_id': fields.many2one('product.removal', 'Removal Strategy', help="Defines the default method used for suggesting the exact location (shelf) where to take the products from, which lot etc. for this location. This method can be enforced at the product category level, and a fallback is made on the parent locations if none is set here."),
'putaway_strategy_id': fields.many2one('product.putaway', 'Put Away Strategy', help="Defines the default method used for suggesting the
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.